Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
  139  DEFAULTLENGTH = {'string':512, 
  140                   'password':512, 
  141                   'upload':512, 
  142                   'text':2**15, 
  143                   'blob':2**31} 
  144  TIMINGSSIZE = 100 
  145  SPATIALLIBS = { 
  146      'Windows':'libspatialite', 
  147      'Linux':'libspatialite.so', 
  148      'Darwin':'libspatialite.dylib' 
  149      } 
  150  DEFAULT_URI = 'sqlite://dummy.db' 
  151   
  152  import re 
  153  import sys 
  154  import locale 
  155  import os 
  156  import types 
  157  import datetime 
  158  import threading 
  159  import time 
  160  import csv 
  161  import cgi 
  162  import copy 
  163  import socket 
  164  import logging 
  165  import base64 
  166  import shutil 
  167  import marshal 
  168  import decimal 
  169  import struct 
  170  import urllib 
  171  import hashlib 
  172  import uuid 
  173  import glob 
  174  import traceback 
  175  import platform 
  176   
  177  PYTHON_VERSION = sys.version_info[0] 
  178  if PYTHON_VERSION == 2: 
  179      import cPickle as pickle 
  180      import cStringIO as StringIO 
  181      import copy_reg as copyreg 
  182      hashlib_md5 = hashlib.md5 
  183      bytes, unicode = str, unicode 
  184  else: 
  185      import pickle 
  186      from io import StringIO as StringIO 
  187      import copyreg 
  188      long = int 
  189      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  190      bytes, unicode = bytes, str 
  191   
  192  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  193                   types.BuiltinFunctionType, 
  194                   types.MethodType, types.BuiltinMethodType) 
  195   
  196  TABLE_ARGS = set( 
  197      ('migrate','primarykey','fake_migrate','format','redefine', 
  198       'singular','plural','trigger_name','sequence_name', 
  199       'common_filter','polymodel','table_class','on_define','actual_name')) 
  200   
  201  SELECT_ARGS = set( 
  202      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  203       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  204   
  205  ogetattr = object.__getattribute__ 
  206  osetattr = object.__setattr__ 
  207  exists = os.path.exists 
  208  pjoin = os.path.join 
  209   
  210  ################################################################################### 
  211  # following checks allow the use of dal without web2py, as a standalone module 
  212  ################################################################################### 
  213  try: 
  214      from utils import web2py_uuid 
  215  except (ImportError, SystemError): 
  216      import uuid 
217 - def web2py_uuid(): return str(uuid.uuid4())
218 219 try: 220 import portalocker 221 have_portalocker = True 222 except ImportError: 223 have_portalocker = False 224 225 try: 226 import serializers 227 have_serializers = True 228 except ImportError: 229 have_serializers = False 230 try: 231 import json as simplejson 232 except ImportError: 233 try: 234 import gluon.contrib.simplejson as simplejson 235 except ImportError: 236 simplejson = None 237 238 try: 239 import validators 240 have_validators = True 241 except (ImportError, SyntaxError): 242 have_validators = False 243 244 LOGGER = logging.getLogger("web2py.dal") 245 DEFAULT = lambda:0 246 247 GLOBAL_LOCKER = threading.RLock() 248 THREAD_LOCAL = threading.local() 249 250 # internal representation of tables with field 251 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 252 253 REGEX_TYPE = re.compile('^([\w\_\:]+)') 254 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 255 REGEX_W = re.compile('^\w+$') 256 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 257 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 258 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 259 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 260 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 261 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 262 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 263 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 264 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 265 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 266 REGEX_QUOTES = re.compile("'[^']*'") 267 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 268 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 269 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 270 271 # list of drivers will be built on the fly 272 # and lists only what is available 273 DRIVERS = [] 274 275 try: 276 from new import classobj 277 from google.appengine.ext import db as gae 278 from google.appengine.api import namespace_manager, rdbms 279 from google.appengine.api.datastore_types import Key ### for belongs on ID 280 from google.appengine.ext.db.polymodel import PolyModel 281 DRIVERS.append('google') 282 except ImportError: 283 pass 284 285 if not 'google' in DRIVERS: 286 287 try: 288 from pysqlite2 import dbapi2 as sqlite2 289 DRIVERS.append('SQLite(sqlite2)') 290 except ImportError: 291 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 292 293 try: 294 from sqlite3 import dbapi2 as sqlite3 295 DRIVERS.append('SQLite(sqlite3)') 296 except ImportError: 297 LOGGER.debug('no SQLite drivers sqlite3') 298 299 try: 300 # first try contrib driver, then from site-packages (if installed) 301 try: 302 import contrib.pymysql as pymysql 303 # monkeypatch pymysql because they havent fixed the bug: 304 # https://github.com/petehunt/PyMySQL/issues/86 305 pymysql.ESCAPE_REGEX = re.compile("'") 306 pymysql.ESCAPE_MAP = {"'": "''"} 307 # end monkeypatch 308 except ImportError: 309 import pymysql 310 DRIVERS.append('MySQL(pymysql)') 311 except ImportError: 312 LOGGER.debug('no MySQL driver pymysql') 313 314 try: 315 import MySQLdb 316 DRIVERS.append('MySQL(MySQLdb)') 317 except ImportError: 318 LOGGER.debug('no MySQL driver MySQLDB') 319 320 321 try: 322 import psycopg2 323 from psycopg2.extensions import adapt as psycopg2_adapt 324 DRIVERS.append('PostgreSQL(psycopg2)') 325 except ImportError: 326 LOGGER.debug('no PostgreSQL driver psycopg2') 327 328 try: 329 # first try contrib driver, then from site-packages (if installed) 330 try: 331 import contrib.pg8000.dbapi as pg8000 332 except ImportError: 333 import pg8000.dbapi as pg8000 334 DRIVERS.append('PostgreSQL(pg8000)') 335 except ImportError: 336 LOGGER.debug('no PostgreSQL driver pg8000') 337 338 try: 339 import cx_Oracle 340 DRIVERS.append('Oracle(cx_Oracle)') 341 except ImportError: 342 LOGGER.debug('no Oracle driver cx_Oracle') 343 344 try: 345 try: 346 import pyodbc 347 except ImportError: 348 try: 349 import contrib.pypyodbc as pyodbc 350 except Exception, e: 351 raise ImportError(str(e)) 352 DRIVERS.append('MSSQL(pyodbc)') 353 DRIVERS.append('DB2(pyodbc)') 354 DRIVERS.append('Teradata(pyodbc)') 355 DRIVERS.append('Ingres(pyodbc)') 356 except ImportError: 357 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 358 359 try: 360 import Sybase 361 DRIVERS.append('Sybase(Sybase)') 362 except ImportError: 363 LOGGER.debug('no Sybase driver') 364 365 try: 366 import kinterbasdb 367 DRIVERS.append('Interbase(kinterbasdb)') 368 DRIVERS.append('Firebird(kinterbasdb)') 369 except ImportError: 370 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 371 372 try: 373 import fdb 374 DRIVERS.append('Firebird(fdb)') 375 except ImportError: 376 LOGGER.debug('no Firebird driver fdb') 377 ##### 378 try: 379 import firebirdsql 380 DRIVERS.append('Firebird(firebirdsql)') 381 except ImportError: 382 LOGGER.debug('no Firebird driver firebirdsql') 383 384 try: 385 import informixdb 386 DRIVERS.append('Informix(informixdb)') 387 LOGGER.warning('Informix support is experimental') 388 except ImportError: 389 LOGGER.debug('no Informix driver informixdb') 390 391 try: 392 import sapdb 393 DRIVERS.append('SQL(sapdb)') 394 LOGGER.warning('SAPDB support is experimental') 395 except ImportError: 396 LOGGER.debug('no SAP driver sapdb') 397 398 try: 399 import cubriddb 400 DRIVERS.append('Cubrid(cubriddb)') 401 LOGGER.warning('Cubrid support is experimental') 402 except ImportError: 403 LOGGER.debug('no Cubrid driver cubriddb') 404 405 try: 406 from com.ziclix.python.sql import zxJDBC 407 import java.sql 408 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 409 from org.sqlite import JDBC # required by java.sql; ensure we have it 410 zxJDBC_sqlite = java.sql.DriverManager 411 DRIVERS.append('PostgreSQL(zxJDBC)') 412 DRIVERS.append('SQLite(zxJDBC)') 413 LOGGER.warning('zxJDBC support is experimental') 414 is_jdbc = True 415 except ImportError: 416 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 417 is_jdbc = False 418 419 try: 420 import couchdb 421 DRIVERS.append('CouchDB(couchdb)') 422 except ImportError: 423 LOGGER.debug('no Couchdb driver couchdb') 424 425 try: 426 import pymongo 427 DRIVERS.append('MongoDB(pymongo)') 428 except: 429 LOGGER.debug('no MongoDB driver pymongo') 430 431 try: 432 import imaplib 433 DRIVERS.append('IMAP(imaplib)') 434 except: 435 LOGGER.debug('no IMAP driver imaplib') 436 437 PLURALIZE_RULES = [ 438 (re.compile('child$'), re.compile('child$'), 'children'), 439 (re.compile('oot$'), re.compile('oot$'), 'eet'), 440 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 441 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 442 (re.compile('sis$'), re.compile('sis$'), 'ses'), 443 (re.compile('man$'), re.compile('man$'), 'men'), 444 (re.compile('ife$'), re.compile('ife$'), 'ives'), 445 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 446 (re.compile('lf$'), re.compile('lf$'), 'lves'), 447 (re.compile('[sxz]$'), re.compile('$'), 'es'), 448 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 449 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 450 (re.compile('$'), re.compile('$'), 's'), 451 ]
452 453 -def pluralize(singular, rules=PLURALIZE_RULES):
454 for line in rules: 455 re_search, re_sub, replace = line 456 plural = re_search.search(singular) and re_sub.sub(replace, singular) 457 if plural: return plural
458
459 -def hide_password(uri):
460 if isinstance(uri,(list,tuple)): 461 return [hide_password(item) for item in uri] 462 return REGEX_NOPASSWD.sub('******',uri)
463
464 -def OR(a,b):
465 return a|b
466
467 -def AND(a,b):
468 return a&b
469
470 -def IDENTITY(x): return x
471
472 -def varquote_aux(name,quotestr='%s'):
473 return name if REGEX_W.match(name) else quotestr % name
474
475 -def quote_keyword(a,keyword='timestamp'):
476 regex = re.compile('\.keyword(?=\w)') 477 a = regex.sub('."%s"' % keyword,a) 478 return a
479 480 if 'google' in DRIVERS: 481 482 is_jdbc = False
483 484 - class GAEDecimalProperty(gae.Property):
485 """ 486 GAE decimal implementation 487 """ 488 data_type = decimal.Decimal 489
490 - def __init__(self, precision, scale, **kwargs):
491 super(GAEDecimalProperty, self).__init__(self, **kwargs) 492 d = '1.' 493 for x in range(scale): 494 d += '0' 495 self.round = decimal.Decimal(d)
496
497 - def get_value_for_datastore(self, model_instance):
498 value = super(GAEDecimalProperty, self)\ 499 .get_value_for_datastore(model_instance) 500 if value is None or value == '': 501 return None 502 else: 503 return str(value)
504
505 - def make_value_from_datastore(self, value):
506 if value is None or value == '': 507 return None 508 else: 509 return decimal.Decimal(value).quantize(self.round)
510
511 - def validate(self, value):
512 value = super(GAEDecimalProperty, self).validate(value) 513 if value is None or isinstance(value, decimal.Decimal): 514 return value 515 elif isinstance(value, basestring): 516 return decimal.Decimal(value) 517 raise gae.BadValueError("Property %s must be a Decimal or string."\ 518 % self.name)
519
520 ################################################################################### 521 # class that handles connection pooling (all adapters are derived from this one) 522 ################################################################################### 523 524 -class ConnectionPool(object):
525 526 POOLS = {} 527 check_active_connection = True 528 529 @staticmethod
530 - def set_folder(folder):
531 THREAD_LOCAL.folder = folder
532 533 # ## this allows gluon to commit/rollback all dbs in this thread 534
535 - def close(self,action='commit',really=True):
536 if action: 537 if callable(action): 538 action(self) 539 else: 540 getattr(self, action)() 541 # ## if you want pools, recycle this connection 542 if self.pool_size: 543 GLOBAL_LOCKER.acquire() 544 pool = ConnectionPool.POOLS[self.uri] 545 if len(pool) < self.pool_size: 546 pool.append(self.connection) 547 really = False 548 GLOBAL_LOCKER.release() 549 if really: 550 self.close_connection() 551 self.connection = None
552 553 @staticmethod
554 - def close_all_instances(action):
555 """ to close cleanly databases in a multithreaded environment """ 556 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 557 for db_uid, db_group in dbs: 558 for db in db_group: 559 if hasattr(db,'_adapter'): 560 db._adapter.close(action) 561 getattr(THREAD_LOCAL,'db_instances',{}).clear() 562 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 563 if callable(action): 564 action(None) 565 return
566
567 - def find_or_make_work_folder(self):
568 """ this actually does not make the folder. it has to be there """ 569 self.folder = getattr(THREAD_LOCAL,'folder','') 570 571 # Creating the folder if it does not exist 572 if False and self.folder and not exists(self.folder): 573 os.mkdir(self.folder)
574
575 - def after_connection_hook(self):
576 """hook for the after_connection parameter""" 577 if callable(self._after_connection): 578 self._after_connection(self) 579 self.after_connection()
580
581 - def after_connection(self):
582 """ this it is supposed to be overloaded by adapters""" 583 pass
584
585 - def reconnect(self, f=None, cursor=True):
586 """ 587 this function defines: self.connection and self.cursor 588 (iff cursor is True) 589 if self.pool_size>0 it will try pull the connection from the pool 590 if the connection is not active (closed by db server) it will loop 591 if not self.pool_size or no active connections in pool makes a new one 592 """ 593 if getattr(self,'connection', None) != None: 594 return 595 if f is None: 596 f = self.connector 597 598 if not hasattr(self, "driver") or self.driver is None: 599 LOGGER.debug("Skipping connection since there's no driver") 600 return 601 602 if not self.pool_size: 603 self.connection = f() 604 self.cursor = cursor and self.connection.cursor() 605 else: 606 uri = self.uri 607 POOLS = ConnectionPool.POOLS 608 while True: 609 GLOBAL_LOCKER.acquire() 610 if not uri in POOLS: 611 POOLS[uri] = [] 612 if POOLS[uri]: 613 self.connection = POOLS[uri].pop() 614 GLOBAL_LOCKER.release() 615 self.cursor = cursor and self.connection.cursor() 616 try: 617 if self.cursor and self.check_active_connection: 618 self.execute('SELECT 1;') 619 break 620 except: 621 pass 622 else: 623 GLOBAL_LOCKER.release() 624 self.connection = f() 625 self.cursor = cursor and self.connection.cursor() 626 break 627 self.after_connection_hook()
628
629 630 ################################################################################### 631 # this is a generic adapter that does nothing; all others are derived from this one 632 ################################################################################### 633 634 -class BaseAdapter(ConnectionPool):
635 native_json = False 636 driver = None 637 driver_name = None 638 drivers = () # list of drivers from which to pick 639 connection = None 640 maxcharlength = MAXCHARLENGTH 641 commit_on_alter_table = False 642 support_distributed_transaction = False 643 uploads_in_blob = False 644 can_select_for_update = True 645 646 TRUE = 'T' 647 FALSE = 'F' 648 T_SEP = ' ' 649 types = { 650 'boolean': 'CHAR(1)', 651 'string': 'CHAR(%(length)s)', 652 'text': 'TEXT', 653 'json': 'TEXT', 654 'password': 'CHAR(%(length)s)', 655 'blob': 'BLOB', 656 'upload': 'CHAR(%(length)s)', 657 'integer': 'INTEGER', 658 'bigint': 'INTEGER', 659 'float':'DOUBLE', 660 'double': 'DOUBLE', 661 'decimal': 'DOUBLE', 662 'date': 'DATE', 663 'time': 'TIME', 664 'datetime': 'TIMESTAMP', 665 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 666 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 667 'list:integer': 'TEXT', 668 'list:string': 'TEXT', 669 'list:reference': 'TEXT', 670 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 671 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 672 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 673 } 674
675 - def id_query(self, table):
676 return table._id != None
677
678 - def adapt(self, obj):
679 return "'%s'" % obj.replace("'", "''")
680
681 - def smart_adapt(self, obj):
682 if isinstance(obj,(int,float)): 683 return str(obj) 684 return self.adapt(str(obj))
685
686 - def integrity_error(self):
687 return self.driver.IntegrityError
688
689 - def operational_error(self):
690 return self.driver.OperationalError
691
692 - def file_exists(self, filename):
693 """ 694 to be used ONLY for files that on GAE may not be on filesystem 695 """ 696 return exists(filename)
697
698 - def file_open(self, filename, mode='rb', lock=True):
699 """ 700 to be used ONLY for files that on GAE may not be on filesystem 701 """ 702 if have_portalocker and lock: 703 fileobj = portalocker.LockedFile(filename,mode) 704 else: 705 fileobj = open(filename,mode) 706 return fileobj
707
708 - def file_close(self, fileobj):
709 """ 710 to be used ONLY for files that on GAE may not be on filesystem 711 """ 712 if fileobj: 713 fileobj.close()
714
715 - def file_delete(self, filename):
716 os.unlink(filename)
717
718 - def find_driver(self,adapter_args,uri=None):
719 if getattr(self,'driver',None) != None: 720 return 721 drivers_available = [driver for driver in self.drivers 722 if driver in globals()] 723 if uri: 724 items = uri.split('://',1)[0].split(':') 725 request_driver = items[1] if len(items)>1 else None 726 else: 727 request_driver = None 728 request_driver = request_driver or adapter_args.get('driver') 729 if request_driver: 730 if request_driver in drivers_available: 731 self.driver_name = request_driver 732 self.driver = globals().get(request_driver) 733 else: 734 raise RuntimeError("driver %s not available" % request_driver) 735 elif drivers_available: 736 self.driver_name = drivers_available[0] 737 self.driver = globals().get(self.driver_name) 738 else: 739 raise RuntimeError("no driver available %s" % str(self.drivers))
740 741
742 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 743 credential_decoder=IDENTITY, driver_args={}, 744 adapter_args={},do_connect=True, after_connection=None):
745 self.db = db 746 self.dbengine = "None" 747 self.uri = uri 748 self.pool_size = pool_size 749 self.folder = folder 750 self.db_codec = db_codec 751 self._after_connection = after_connection 752 class Dummy(object): 753 lastrowid = 1 754 def __getattr__(self, value): 755 return lambda *a, **b: []
756 self.connection = Dummy() 757 self.cursor = Dummy() 758
759 - def sequence_name(self,tablename):
760 return '%s_sequence' % tablename
761
762 - def trigger_name(self,tablename):
763 return '%s_sequence' % tablename
764
765 - def varquote(self,name):
766 return name
767
768 - def create_table(self, table, 769 migrate=True, 770 fake_migrate=False, 771 polymodel=None):
772 db = table._db 773 fields = [] 774 # PostGIS geo fields are added after the table has been created 775 postcreation_fields = [] 776 sql_fields = {} 777 sql_fields_aux = {} 778 TFK = {} 779 tablename = table._tablename 780 sortable = 0 781 types = self.types 782 for field in table: 783 sortable += 1 784 field_name = field.name 785 field_type = field.type 786 if isinstance(field_type,SQLCustomType): 787 ftype = field_type.native or field_type.type 788 elif field_type.startswith('reference'): 789 referenced = field_type[10:].strip() 790 if referenced == '.': 791 referenced = tablename 792 constraint_name = self.constraint_name(tablename, field_name) 793 if not '.' in referenced \ 794 and referenced != tablename \ 795 and hasattr(table,'_primarykey'): 796 ftype = types['integer'] 797 else: 798 if hasattr(table,'_primarykey'): 799 rtablename,rfieldname = referenced.split('.') 800 rtable = db[rtablename] 801 rfield = rtable[rfieldname] 802 # must be PK reference or unique 803 if rfieldname in rtable._primarykey or \ 804 rfield.unique: 805 ftype = types[rfield.type[:9]] % \ 806 dict(length=rfield.length) 807 # multicolumn primary key reference? 808 if not rfield.unique and len(rtable._primarykey)>1: 809 # then it has to be a table level FK 810 if rtablename not in TFK: 811 TFK[rtablename] = {} 812 TFK[rtablename][rfieldname] = field_name 813 else: 814 ftype = ftype + \ 815 types['reference FK'] % dict( 816 constraint_name = constraint_name, # should be quoted 817 foreign_key = '%s (%s)' % (rtablename, 818 rfieldname), 819 table_name = tablename, 820 field_name = field_name, 821 on_delete_action=field.ondelete) 822 else: 823 # make a guess here for circular references 824 if referenced in db: 825 id_fieldname = db[referenced]._id.name 826 elif referenced == tablename: 827 id_fieldname = table._id.name 828 else: #make a guess 829 id_fieldname = 'id' 830 ftype = types[field_type[:9]] % dict( 831 index_name = field_name+'__idx', 832 field_name = field_name, 833 constraint_name = constraint_name, 834 foreign_key = '%s (%s)' % (referenced, 835 id_fieldname), 836 on_delete_action=field.ondelete) 837 elif field_type.startswith('list:reference'): 838 ftype = types[field_type[:14]] 839 elif field_type.startswith('decimal'): 840 precision, scale = map(int,field_type[8:-1].split(',')) 841 ftype = types[field_type[:7]] % \ 842 dict(precision=precision,scale=scale) 843 elif field_type.startswith('geo'): 844 if not hasattr(self,'srid'): 845 raise RuntimeError('Adapter does not support geometry') 846 srid = self.srid 847 geotype, parms = field_type[:-1].split('(') 848 if not geotype in types: 849 raise SyntaxError( 850 'Field: unknown field type: %s for %s' \ 851 % (field_type, field_name)) 852 ftype = types[geotype] 853 if self.dbengine == 'postgres' and geotype == 'geometry': 854 # parameters: schema, srid, dimension 855 dimension = 2 # GIS.dimension ??? 856 parms = parms.split(',') 857 if len(parms) == 3: 858 schema, srid, dimension = parms 859 elif len(parms) == 2: 860 schema, srid = parms 861 else: 862 schema = parms[0] 863 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 864 ftype = ftype % dict(schema=schema, 865 tablename=tablename, 866 fieldname=field_name, srid=srid, 867 dimension=dimension) 868 postcreation_fields.append(ftype) 869 elif not field_type in types: 870 raise SyntaxError('Field: unknown field type: %s for %s' % \ 871 (field_type, field_name)) 872 else: 873 ftype = types[field_type]\ 874 % dict(length=field.length) 875 if not field_type.startswith('id') and \ 876 not field_type.startswith('reference'): 877 if field.notnull: 878 ftype += ' NOT NULL' 879 else: 880 ftype += self.ALLOW_NULL() 881 if field.unique: 882 ftype += ' UNIQUE' 883 if field.custom_qualifier: 884 ftype += ' %s' % field.custom_qualifier 885 886 # add to list of fields 887 sql_fields[field_name] = dict( 888 length=field.length, 889 unique=field.unique, 890 notnull=field.notnull, 891 sortable=sortable, 892 type=str(field_type), 893 sql=ftype) 894 895 if field.notnull and not field.default is None: 896 # Caveat: sql_fields and sql_fields_aux 897 # differ for default values. 898 # sql_fields is used to trigger migrations and sql_fields_aux 899 # is used for create tables. 900 # The reason is that we do not want to trigger 901 # a migration simply because a default value changes. 902 not_null = self.NOT_NULL(field.default, field_type) 903 ftype = ftype.replace('NOT NULL', not_null) 904 sql_fields_aux[field_name] = dict(sql=ftype) 905 # Postgres - PostGIS: 906 # geometry fields are added after the table has been created, not now 907 if not (self.dbengine == 'postgres' and \ 908 field_type.startswith('geom')): 909 fields.append('%s %s' % (field_name, ftype)) 910 other = ';' 911 912 # backend-specific extensions to fields 913 if self.dbengine == 'mysql': 914 if not hasattr(table, "_primarykey"): 915 fields.append('PRIMARY KEY(%s)' % table._id.name) 916 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 917 918 fields = ',\n '.join(fields) 919 for rtablename in TFK: 920 rfields = TFK[rtablename] 921 pkeys = db[rtablename]._primarykey 922 fkeys = [ rfields[k] for k in pkeys ] 923 fields = fields + ',\n ' + \ 924 types['reference TFK'] % dict( 925 table_name = tablename, 926 field_name=', '.join(fkeys), 927 foreign_table = rtablename, 928 foreign_key = ', '.join(pkeys), 929 on_delete_action = field.ondelete) 930 931 if getattr(table,'_primarykey',None): 932 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 933 (tablename, fields, 934 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 935 else: 936 query = "CREATE TABLE %s(\n %s\n)%s" % \ 937 (tablename, fields, other) 938 939 if self.uri.startswith('sqlite:///') \ 940 or self.uri.startswith('spatialite:///'): 941 path_encoding = sys.getfilesystemencoding() \ 942 or locale.getdefaultlocale()[1] or 'utf8' 943 dbpath = self.uri[9:self.uri.rfind('/')]\ 944 .decode('utf8').encode(path_encoding) 945 else: 946 dbpath = self.folder 947 948 if not migrate: 949 return query 950 elif self.uri.startswith('sqlite:memory')\ 951 or self.uri.startswith('spatialite:memory'): 952 table._dbt = None 953 elif isinstance(migrate, str): 954 table._dbt = pjoin(dbpath, migrate) 955 else: 956 table._dbt = pjoin( 957 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 958 959 if table._dbt: 960 table._loggername = pjoin(dbpath, 'sql.log') 961 logfile = self.file_open(table._loggername, 'a') 962 else: 963 logfile = None 964 if not table._dbt or not self.file_exists(table._dbt): 965 if table._dbt: 966 logfile.write('timestamp: %s\n' 967 % datetime.datetime.today().isoformat()) 968 logfile.write(query + '\n') 969 if not fake_migrate: 970 self.create_sequence_and_triggers(query,table) 971 table._db.commit() 972 # Postgres geom fields are added now, 973 # after the table has been created 974 for query in postcreation_fields: 975 self.execute(query) 976 table._db.commit() 977 if table._dbt: 978 tfile = self.file_open(table._dbt, 'w') 979 pickle.dump(sql_fields, tfile) 980 self.file_close(tfile) 981 if fake_migrate: 982 logfile.write('faked!\n') 983 else: 984 logfile.write('success!\n') 985 else: 986 tfile = self.file_open(table._dbt, 'r') 987 try: 988 sql_fields_old = pickle.load(tfile) 989 except EOFError: 990 self.file_close(tfile) 991 self.file_close(logfile) 992 raise RuntimeError('File %s appears corrupted' % table._dbt) 993 self.file_close(tfile) 994 if sql_fields != sql_fields_old: 995 self.migrate_table(table, 996 sql_fields, sql_fields_old, 997 sql_fields_aux, logfile, 998 fake_migrate=fake_migrate) 999 self.file_close(logfile) 1000 return query
1001
1002 - def migrate_table( 1003 self, 1004 table, 1005 sql_fields, 1006 sql_fields_old, 1007 sql_fields_aux, 1008 logfile, 1009 fake_migrate=False, 1010 ):
1011 db = table._db 1012 db._migrated.append(table._tablename) 1013 tablename = table._tablename 1014 def fix(item): 1015 k,v=item 1016 if not isinstance(v,dict): 1017 v=dict(type='unkown',sql=v) 1018 return k.lower(),v
1019 # make sure all field names are lower case to avoid 1020 # migrations because of case cahnge 1021 sql_fields = dict(map(fix,sql_fields.iteritems())) 1022 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1023 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1024 if db._debug: 1025 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1026 1027 keys = sql_fields.keys() 1028 for key in sql_fields_old: 1029 if not key in keys: 1030 keys.append(key) 1031 new_add = self.concat_add(tablename) 1032 1033 metadata_change = False 1034 sql_fields_current = copy.copy(sql_fields_old) 1035 for key in keys: 1036 query = None 1037 if not key in sql_fields_old: 1038 sql_fields_current[key] = sql_fields[key] 1039 if self.dbengine in ('postgres',) and \ 1040 sql_fields[key]['type'].startswith('geometry'): 1041 # 'sql' == ftype in sql 1042 query = [ sql_fields[key]['sql'] ] 1043 else: 1044 query = ['ALTER TABLE %s ADD %s %s;' % \ 1045 (tablename, key, 1046 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1047 metadata_change = True 1048 elif self.dbengine in ('sqlite', 'spatialite'): 1049 if key in sql_fields: 1050 sql_fields_current[key] = sql_fields[key] 1051 metadata_change = True 1052 elif not key in sql_fields: 1053 del sql_fields_current[key] 1054 ftype = sql_fields_old[key]['type'] 1055 if self.dbengine in ('postgres',) \ 1056 and ftype.startswith('geometry'): 1057 geotype, parms = ftype[:-1].split('(') 1058 schema = parms.split(',')[0] 1059 query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ] 1060 elif not self.dbengine in ('firebird',): 1061 query = ['ALTER TABLE %s DROP COLUMN %s;' 1062 % (tablename, key)] 1063 else: 1064 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1065 metadata_change = True 1066 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1067 and not (key in table.fields and 1068 isinstance(table[key].type, SQLCustomType)) \ 1069 and not sql_fields[key]['type'].startswith('reference')\ 1070 and not sql_fields[key]['type'].startswith('double')\ 1071 and not sql_fields[key]['type'].startswith('id'): 1072 sql_fields_current[key] = sql_fields[key] 1073 t = tablename 1074 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1075 if not self.dbengine in ('firebird',): 1076 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1077 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1078 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 1079 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1080 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1081 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 1082 else: 1083 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1084 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1085 'ALTER TABLE %s DROP %s;' % (t, key), 1086 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1087 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1088 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 1089 metadata_change = True 1090 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1091 sql_fields_current[key] = sql_fields[key] 1092 metadata_change = True 1093 1094 if query: 1095 logfile.write('timestamp: %s\n' 1096 % datetime.datetime.today().isoformat()) 1097 db['_lastsql'] = '\n'.join(query) 1098 for sub_query in query: 1099 logfile.write(sub_query + '\n') 1100 if not fake_migrate: 1101 self.execute(sub_query) 1102 # Caveat: mysql, oracle and firebird do not allow multiple alter table 1103 # in one transaction so we must commit partial transactions and 1104 # update table._dbt after alter table. 1105 if db._adapter.commit_on_alter_table: 1106 db.commit() 1107 tfile = self.file_open(table._dbt, 'w') 1108 pickle.dump(sql_fields_current, tfile) 1109 self.file_close(tfile) 1110 logfile.write('success!\n') 1111 else: 1112 logfile.write('faked!\n') 1113 elif metadata_change: 1114 tfile = self.file_open(table._dbt, 'w') 1115 pickle.dump(sql_fields_current, tfile) 1116 self.file_close(tfile) 1117 1118 if metadata_change and \ 1119 not (query and self.dbengine in ('mysql','oracle','firebird')): 1120 db.commit() 1121 tfile = self.file_open(table._dbt, 'w') 1122 pickle.dump(sql_fields_current, tfile) 1123 self.file_close(tfile) 1124
1125 - def LOWER(self, first):
1126 return 'LOWER(%s)' % self.expand(first)
1127
1128 - def UPPER(self, first):
1129 return 'UPPER(%s)' % self.expand(first)
1130
1131 - def COUNT(self, first, distinct=None):
1132 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1133 % self.expand(first)
1134
1135 - def EXTRACT(self, first, what):
1136 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1137
1138 - def EPOCH(self, first):
1139 return self.EXTRACT(first, 'epoch')
1140
1141 - def LENGTH(self, first):
1142 return "LENGTH(%s)" % self.expand(first)
1143
1144 - def AGGREGATE(self, first, what):
1145 return "%s(%s)" % (what, self.expand(first))
1146
1147 - def JOIN(self):
1148 return 'JOIN'
1149
1150 - def LEFT_JOIN(self):
1151 return 'LEFT JOIN'
1152
1153 - def RANDOM(self):
1154 return 'Random()'
1155
1156 - def NOT_NULL(self, default, field_type):
1157 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1158
1159 - def COALESCE(self, first, second):
1160 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1161 return 'COALESCE(%s)' % ','.join(expressions)
1162
1163 - def COALESCE_ZERO(self, first):
1164 return 'COALESCE(%s,0)' % self.expand(first)
1165
1166 - def RAW(self, first):
1167 return first
1168
1169 - def ALLOW_NULL(self):
1170 return ''
1171
1172 - def SUBSTRING(self, field, parameters):
1173 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1174
1175 - def PRIMARY_KEY(self, key):
1176 return 'PRIMARY KEY(%s)' % key
1177
1178 - def _drop(self, table, mode):
1179 return ['DROP TABLE %s;' % table]
1180
1181 - def drop(self, table, mode=''):
1182 db = table._db 1183 if table._dbt: 1184 logfile = self.file_open(table._loggername, 'a') 1185 queries = self._drop(table, mode) 1186 for query in queries: 1187 if table._dbt: 1188 logfile.write(query + '\n') 1189 self.execute(query) 1190 db.commit() 1191 del db[table._tablename] 1192 del db.tables[db.tables.index(table._tablename)] 1193 db._remove_references_to(table) 1194 if table._dbt: 1195 self.file_delete(table._dbt) 1196 logfile.write('success!\n')
1197
1198 - def _insert(self, table, fields):
1199 if fields: 1200 keys = ','.join(f.name for f, v in fields) 1201 values = ','.join(self.expand(v, f.type) for f, v in fields) 1202 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1203 else: 1204 return self._insert_empty(table) 1205
1206 - def _insert_empty(self, table):
1207 return 'INSERT INTO %s DEFAULT VALUES;' % table
1208
1209 - def insert(self, table, fields):
1210 query = self._insert(table,fields) 1211 try: 1212 self.execute(query) 1213 except Exception: 1214 e = sys.exc_info()[1] 1215 if isinstance(e,self.integrity_error_class()): 1216 return None 1217 raise e 1218 if hasattr(table,'_primarykey'): 1219 return dict([(k[0].name, k[1]) for k in fields \ 1220 if k[0].name in table._primarykey]) 1221 id = self.lastrowid(table) 1222 if not isinstance(id,int): 1223 return id 1224 rid = Reference(id) 1225 (rid._table, rid._record) = (table, None) 1226 return rid
1227
1228 - def bulk_insert(self, table, items):
1229 return [self.insert(table,item) for item in items]
1230
1231 - def NOT(self, first):
1232 return '(NOT %s)' % self.expand(first)
1233
1234 - def AND(self, first, second):
1235 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1236
1237 - def OR(self, first, second):
1238 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1239
1240 - def BELONGS(self, first, second):
1241 if isinstance(second, str): 1242 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1243 elif not second: 1244 return '(1=0)' 1245 items = ','.join(self.expand(item, first.type) for item in second) 1246 return '(%s IN (%s))' % (self.expand(first), items)
1247
1248 - def REGEXP(self, first, second):
1249 "regular expression operator" 1250 raise NotImplementedError
1251
1252 - def LIKE(self, first, second):
1253 "case sensitive like operator" 1254 raise NotImplementedError
1255
1256 - def ILIKE(self, first, second):
1257 "case in-sensitive like operator" 1258 return '(%s LIKE %s)' % (self.expand(first), 1259 self.expand(second, 'string'))
1260
1261 - def STARTSWITH(self, first, second):
1262 return '(%s LIKE %s)' % (self.expand(first), 1263 self.expand(second+'%', 'string'))
1264
1265 - def ENDSWITH(self, first, second):
1266 return '(%s LIKE %s)' % (self.expand(first), 1267 self.expand('%'+second, 'string'))
1268
1269 - def CONTAINS(self,first,second,case_sensitive=False):
1270 if first.type in ('string','text', 'json'): 1271 second = Expression(None,self.CONCAT('%',Expression( 1272 None,self.REPLACE(second,('%','%%'))),'%')) 1273 elif first.type.startswith('list:'): 1274 second = Expression(None,self.CONCAT('%|',Expression(None,self.REPLACE( 1275 Expression(None,self.REPLACE(second,('%','%%'))),('|','||'))),'|%')) 1276 op = case_sensitive and self.LIKE or self.ILIKE 1277 return op(first,second)
1278
1279 - def EQ(self, first, second=None):
1280 if second is None: 1281 return '(%s IS NULL)' % self.expand(first) 1282 return '(%s = %s)' % (self.expand(first), 1283 self.expand(second, first.type))
1284
1285 - def NE(self, first, second=None):
1286 if second is None: 1287 return '(%s IS NOT NULL)' % self.expand(first) 1288 return '(%s <> %s)' % (self.expand(first), 1289 self.expand(second, first.type))
1290
1291 - def LT(self,first,second=None):
1292 if second is None: 1293 raise RuntimeError("Cannot compare %s < None" % first) 1294 return '(%s < %s)' % (self.expand(first), 1295 self.expand(second,first.type))
1296
1297 - def LE(self,first,second=None):
1298 if second is None: 1299 raise RuntimeError("Cannot compare %s <= None" % first) 1300 return '(%s <= %s)' % (self.expand(first), 1301 self.expand(second,first.type))
1302
1303 - def GT(self,first,second=None):
1304 if second is None: 1305 raise RuntimeError("Cannot compare %s > None" % first) 1306 return '(%s > %s)' % (self.expand(first), 1307 self.expand(second,first.type))
1308
1309 - def GE(self,first,second=None):
1310 if second is None: 1311 raise RuntimeError("Cannot compare %s >= None" % first) 1312 return '(%s >= %s)' % (self.expand(first), 1313 self.expand(second,first.type))
1314
1315 - def is_numerical_type(self, ftype):
1316 return ftype in ('integer','boolean','double','bigint') or \ 1317 ftype.startswith('decimal')
1318
1319 - def REPLACE(self, first, (second, third)):
1320 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1321 self.expand(second,'string'), 1322 self.expand(third,'string'))
1323
1324 - def CONCAT(self, *items):
1325 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1326
1327 - def ADD(self, first, second):
1328 if self.is_numerical_type(first.type): 1329 return '(%s + %s)' % (self.expand(first), 1330 self.expand(second, first.type)) 1331 else: 1332 return self.CONCAT(first, second)
1333
1334 - def SUB(self, first, second):
1335 return '(%s - %s)' % (self.expand(first), 1336 self.expand(second, first.type))
1337
1338 - def MUL(self, first, second):
1339 return '(%s * %s)' % (self.expand(first), 1340 self.expand(second, first.type))
1341
1342 - def DIV(self, first, second):
1343 return '(%s / %s)' % (self.expand(first), 1344 self.expand(second, first.type))
1345
1346 - def MOD(self, first, second):
1347 return '(%s %% %s)' % (self.expand(first), 1348 self.expand(second, first.type))
1349
1350 - def AS(self, first, second):
1351 return '%s AS %s' % (self.expand(first), second)
1352
1353 - def ON(self, first, second):
1354 if use_common_filters(second): 1355 second = self.common_filter(second,[first._tablename]) 1356 return '%s ON %s' % (self.expand(first), self.expand(second))
1357
1358 - def INVERT(self, first):
1359 return '%s DESC' % self.expand(first)
1360
1361 - def COMMA(self, first, second):
1362 return '%s, %s' % (self.expand(first), self.expand(second))
1363
1364 - def expand(self, expression, field_type=None):
1365 if isinstance(expression, Field): 1366 return '%s.%s' % (expression.tablename, expression.name) 1367 elif isinstance(expression, (Expression, Query)): 1368 first = expression.first 1369 second = expression.second 1370 op = expression.op 1371 optional_args = expression.optional_args or {} 1372 if not second is None: 1373 return op(first, second, **optional_args) 1374 elif not first is None: 1375 return op(first,**optional_args) 1376 elif isinstance(op, str): 1377 if op.endswith(';'): 1378 op=op[:-1] 1379 return '(%s)' % op 1380 else: 1381 return op() 1382 elif field_type: 1383 return str(self.represent(expression,field_type)) 1384 elif isinstance(expression,(list,tuple)): 1385 return ','.join(self.represent(item,field_type) \ 1386 for item in expression) 1387 elif isinstance(expression, bool): 1388 return '1' if expression else '0' 1389 else: 1390 return str(expression)
1391
1392 - def table_alias(self,name):
1393 return str(name if isinstance(name,Table) else self.db[name])
1394
1395 - def alias(self, table, alias):
1396 """ 1397 Given a table object, makes a new table object 1398 with alias name. 1399 """ 1400 other = copy.copy(table) 1401 other['_ot'] = other._ot or other._tablename 1402 other['ALL'] = SQLALL(other) 1403 other['_tablename'] = alias 1404 for fieldname in other.fields: 1405 other[fieldname] = copy.copy(other[fieldname]) 1406 other[fieldname]._tablename = alias 1407 other[fieldname].tablename = alias 1408 other[fieldname].table = other 1409 table._db[alias] = other 1410 return other
1411
1412 - def _truncate(self, table, mode=''):
1413 tablename = table._tablename 1414 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1415
1416 - def truncate(self, table, mode= ' '):
1417 # Prepare functions "write_to_logfile" and "close_logfile" 1418 if table._dbt: 1419 logfile = self.file_open(table._loggername, 'a') 1420 else: 1421 class Logfile(object): 1422 def write(self, value): 1423 pass
1424 def close(self): 1425 pass 1426 logfile = Logfile() 1427 1428 try: 1429 queries = table._db._adapter._truncate(table, mode) 1430 for query in queries: 1431 logfile.write(query + '\n') 1432 self.execute(query) 1433 table._db.commit() 1434 logfile.write('success!\n') 1435 finally: 1436 logfile.close() 1437
1438 - def _update(self, tablename, query, fields):
1439 if query: 1440 if use_common_filters(query): 1441 query = self.common_filter(query, [tablename]) 1442 sql_w = ' WHERE ' + self.expand(query) 1443 else: 1444 sql_w = '' 1445 sql_v = ','.join(['%s=%s' % (field.name, 1446 self.expand(value, field.type)) \ 1447 for (field, value) in fields]) 1448 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1449
1450 - def update(self, tablename, query, fields):
1451 sql = self._update(tablename, query, fields) 1452 self.execute(sql) 1453 try: 1454 return self.cursor.rowcount 1455 except: 1456 return None
1457
1458 - def _delete(self, tablename, query):
1459 if query: 1460 if use_common_filters(query): 1461 query = self.common_filter(query, [tablename]) 1462 sql_w = ' WHERE ' + self.expand(query) 1463 else: 1464 sql_w = '' 1465 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1466
1467 - def delete(self, tablename, query):
1468 sql = self._delete(tablename, query) 1469 ### special code to handle CASCADE in SQLite & SpatiaLite 1470 db = self.db 1471 table = db[tablename] 1472 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1473 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1474 ### end special code to handle CASCADE in SQLite & SpatiaLite 1475 self.execute(sql) 1476 try: 1477 counter = self.cursor.rowcount 1478 except: 1479 counter = None 1480 ### special code to handle CASCADE in SQLite & SpatiaLite 1481 if self.dbengine in ('sqlite', 'spatialite') and counter: 1482 for field in table._referenced_by: 1483 if field.type=='reference '+table._tablename \ 1484 and field.ondelete=='CASCADE': 1485 db(field.belongs(deleted)).delete() 1486 ### end special code to handle CASCADE in SQLite & SpatiaLite 1487 return counter
1488
1489 - def get_table(self, query):
1490 tablenames = self.tables(query) 1491 if len(tablenames)==1: 1492 return tablenames[0] 1493 elif len(tablenames)<1: 1494 raise RuntimeError("No table selected") 1495 else: 1496 raise RuntimeError("Too many tables selected")
1497
1498 - def expand_all(self, fields, tablenames):
1499 db = self.db 1500 new_fields = [] 1501 append = new_fields.append 1502 for item in fields: 1503 if isinstance(item,SQLALL): 1504 new_fields += item._table 1505 elif isinstance(item,str): 1506 if REGEX_TABLE_DOT_FIELD.match(item): 1507 tablename,fieldname = item.split('.') 1508 append(db[tablename][fieldname]) 1509 else: 1510 append(Expression(db,lambda item=item:item)) 1511 else: 1512 append(item) 1513 # ## if no fields specified take them all from the requested tables 1514 if not new_fields: 1515 for table in tablenames: 1516 for field in db[table]: 1517 append(field) 1518 return new_fields
1519
1520 - def _select(self, query, fields, attributes):
1521 tables = self.tables 1522 for key in set(attributes.keys())-SELECT_ARGS: 1523 raise SyntaxError('invalid select attribute: %s' % key) 1524 args_get = attributes.get 1525 tablenames = tables(query) 1526 tablenames_for_common_filters = tablenames 1527 for field in fields: 1528 if isinstance(field, basestring) \ 1529 and REGEX_TABLE_DOT_FIELD.match(field): 1530 tn,fn = field.split('.') 1531 field = self.db[tn][fn] 1532 for tablename in tables(field): 1533 if not tablename in tablenames: 1534 tablenames.append(tablename) 1535 1536 if len(tablenames) < 1: 1537 raise SyntaxError('Set: no tables selected') 1538 self._colnames = map(self.expand, fields) 1539 def geoexpand(field): 1540 if isinstance(field.type,str) and field.type.startswith('geometry'): 1541 field = field.st_astext() 1542 return self.expand(field)
1543 sql_f = ', '.join(map(geoexpand, fields)) 1544 sql_o = '' 1545 sql_s = '' 1546 left = args_get('left', False) 1547 inner_join = args_get('join', False) 1548 distinct = args_get('distinct', False) 1549 groupby = args_get('groupby', False) 1550 orderby = args_get('orderby', False) 1551 having = args_get('having', False) 1552 limitby = args_get('limitby', False) 1553 orderby_on_limitby = args_get('orderby_on_limitby', True) 1554 for_update = args_get('for_update', False) 1555 if self.can_select_for_update is False and for_update is True: 1556 raise SyntaxError('invalid select attribute: for_update') 1557 if distinct is True: 1558 sql_s += 'DISTINCT' 1559 elif distinct: 1560 sql_s += 'DISTINCT ON (%s)' % distinct 1561 if inner_join: 1562 icommand = self.JOIN() 1563 if not isinstance(inner_join, (tuple, list)): 1564 inner_join = [inner_join] 1565 ijoint = [t._tablename for t in inner_join 1566 if not isinstance(t,Expression)] 1567 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1568 itables_to_merge={} #issue 490 1569 [itables_to_merge.update( 1570 dict.fromkeys(tables(t))) for t in ijoinon] 1571 ijoinont = [t.first._tablename for t in ijoinon] 1572 [itables_to_merge.pop(t) for t in ijoinont 1573 if t in itables_to_merge] #issue 490 1574 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1575 iexcluded = [t for t in tablenames 1576 if not t in iimportant_tablenames] 1577 if left: 1578 join = attributes['left'] 1579 command = self.LEFT_JOIN() 1580 if not isinstance(join, (tuple, list)): 1581 join = [join] 1582 joint = [t._tablename for t in join 1583 if not isinstance(t, Expression)] 1584 joinon = [t for t in join if isinstance(t, Expression)] 1585 #patch join+left patch (solves problem with ordering in left joins) 1586 tables_to_merge={} 1587 [tables_to_merge.update( 1588 dict.fromkeys(tables(t))) for t in joinon] 1589 joinont = [t.first._tablename for t in joinon] 1590 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1591 tablenames_for_common_filters = [t for t in tablenames 1592 if not t in joinont ] 1593 important_tablenames = joint + joinont + tables_to_merge.keys() 1594 excluded = [t for t in tablenames 1595 if not t in important_tablenames ] 1596 else: 1597 excluded = tablenames 1598 1599 if use_common_filters(query): 1600 query = self.common_filter(query,tablenames_for_common_filters) 1601 sql_w = ' WHERE ' + self.expand(query) if query else '' 1602 1603 if inner_join and not left: 1604 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1605 itables_to_merge.keys()]) 1606 for t in ijoinon: 1607 sql_t += ' %s %s' % (icommand, t) 1608 elif not inner_join and left: 1609 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1610 tables_to_merge.keys()]) 1611 if joint: 1612 sql_t += ' %s %s' % (command, 1613 ','.join([self.table_alias(t) for t in joint])) 1614 for t in joinon: 1615 sql_t += ' %s %s' % (command, t) 1616 elif inner_join and left: 1617 all_tables_in_query = set(important_tablenames + \ 1618 iimportant_tablenames + \ 1619 tablenames) 1620 tables_in_joinon = set(joinont + ijoinont) 1621 tables_not_in_joinon = \ 1622 all_tables_in_query.difference(tables_in_joinon) 1623 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1624 for t in ijoinon: 1625 sql_t += ' %s %s' % (icommand, t) 1626 if joint: 1627 sql_t += ' %s %s' % (command, 1628 ','.join([self.table_alias(t) for t in joint])) 1629 for t in joinon: 1630 sql_t += ' %s %s' % (command, t) 1631 else: 1632 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1633 if groupby: 1634 if isinstance(groupby, (list, tuple)): 1635 groupby = xorify(groupby) 1636 sql_o += ' GROUP BY %s' % self.expand(groupby) 1637 if having: 1638 sql_o += ' HAVING %s' % attributes['having'] 1639 if orderby: 1640 if isinstance(orderby, (list, tuple)): 1641 orderby = xorify(orderby) 1642 if str(orderby) == '<random>': 1643 sql_o += ' ORDER BY %s' % self.RANDOM() 1644 else: 1645 sql_o += ' ORDER BY %s' % self.expand(orderby) 1646 if limitby: 1647 if orderby_on_limitby and not orderby and tablenames: 1648 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1649 # oracle does not support limitby 1650 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1651 if for_update and self.can_select_for_update is True: 1652 sql = sql.rstrip(';') + ' FOR UPDATE;' 1653 return sql 1654
1655 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1656 if limitby: 1657 (lmin, lmax) = limitby 1658 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1659 return 'SELECT %s %s FROM %s%s%s;' % \ 1660 (sql_s, sql_f, sql_t, sql_w, sql_o)
1661
1662 - def _fetchall(self):
1663 return self.cursor.fetchall()
1664
1665 - def _select_aux(self,sql,fields,attributes):
1666 args_get = attributes.get 1667 cache = args_get('cache',None) 1668 if not cache: 1669 self.execute(sql) 1670 rows = self._fetchall() 1671 else: 1672 (cache_model, time_expire) = cache 1673 key = self.uri + '/' + sql + '/rows' 1674 if len(key)>200: key = hashlib_md5(key).hexdigest() 1675 def _select_aux2(): 1676 self.execute(sql) 1677 return self._fetchall()
1678 rows = cache_model(key,_select_aux2,time_expire) 1679 if isinstance(rows,tuple): 1680 rows = list(rows) 1681 limitby = args_get('limitby', None) or (0,) 1682 rows = self.rowslice(rows,limitby[0],None) 1683 processor = args_get('processor',self.parse) 1684 cacheable = args_get('cacheable',False) 1685 return processor(rows,fields,self._colnames,cacheable=cacheable) 1686
1687 - def select(self, query, fields, attributes):
1688 """ 1689 Always returns a Rows object, possibly empty. 1690 """ 1691 sql = self._select(query, fields, attributes) 1692 cache = attributes.get('cache', None) 1693 if cache and attributes.get('cacheable',False): 1694 del attributes['cache'] 1695 (cache_model, time_expire) = cache 1696 key = self.uri + '/' + sql 1697 if len(key)>200: key = hashlib_md5(key).hexdigest() 1698 args = (sql,fields,attributes) 1699 return cache_model( 1700 key, 1701 lambda self=self,args=args:self._select_aux(*args), 1702 time_expire) 1703 else: 1704 return self._select_aux(sql,fields,attributes)
1705
1706 - def _count(self, query, distinct=None):
1707 tablenames = self.tables(query) 1708 if query: 1709 if use_common_filters(query): 1710 query = self.common_filter(query, tablenames) 1711 sql_w = ' WHERE ' + self.expand(query) 1712 else: 1713 sql_w = '' 1714 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1715 if distinct: 1716 if isinstance(distinct,(list, tuple)): 1717 distinct = xorify(distinct) 1718 sql_d = self.expand(distinct) 1719 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1720 (sql_d, sql_t, sql_w) 1721 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1722
1723 - def count(self, query, distinct=None):
1724 self.execute(self._count(query, distinct)) 1725 return self.cursor.fetchone()[0]
1726
1727 - def tables(self, *queries):
1728 tables = set() 1729 for query in queries: 1730 if isinstance(query, Field): 1731 tables.add(query.tablename) 1732 elif isinstance(query, (Expression, Query)): 1733 if not query.first is None: 1734 tables = tables.union(self.tables(query.first)) 1735 if not query.second is None: 1736 tables = tables.union(self.tables(query.second)) 1737 return list(tables)
1738
1739 - def commit(self):
1740 if self.connection: return self.connection.commit()
1741
1742 - def rollback(self):
1743 if self.connection: return self.connection.rollback()
1744
1745 - def close_connection(self):
1746 if self.connection: return self.connection.close()
1747
1748 - def distributed_transaction_begin(self, key):
1749 return
1750
1751 - def prepare(self, key):
1752 if self.connection: self.connection.prepare()
1753
1754 - def commit_prepared(self, key):
1755 if self.connection: self.connection.commit()
1756
1757 - def rollback_prepared(self, key):
1758 if self.connection: self.connection.rollback()
1759
1760 - def concat_add(self, tablename):
1761 return ', ADD '
1762
1763 - def constraint_name(self, table, fieldname):
1764 return '%s_%s__constraint' % (table,fieldname)
1765
1766 - def create_sequence_and_triggers(self, query, table, **args):
1767 self.execute(query)
1768
1769 - def log_execute(self, *a, **b):
1770 if not self.connection: return None 1771 command = a[0] 1772 if hasattr(self,'filter_sql_command'): 1773 command = self.filter_sql_command(command) 1774 if self.db._debug: 1775 LOGGER.debug('SQL: %s' % command) 1776 self.db._lastsql = command 1777 t0 = time.time() 1778 ret = self.cursor.execute(command, *a[1:], **b) 1779 self.db._timings.append((command,time.time()-t0)) 1780 del self.db._timings[:-TIMINGSSIZE] 1781 return ret
1782
1783 - def execute(self, *a, **b):
1784 return self.log_execute(*a, **b)
1785
1786 - def represent(self, obj, fieldtype):
1787 field_is_type = fieldtype.startswith 1788 if isinstance(obj, CALLABLETYPES): 1789 obj = obj() 1790 if isinstance(fieldtype, SQLCustomType): 1791 value = fieldtype.encoder(obj) 1792 if fieldtype.type in ('string','text', 'json'): 1793 return self.adapt(value) 1794 return value 1795 if isinstance(obj, (Expression, Field)): 1796 return str(obj) 1797 if field_is_type('list:'): 1798 if not obj: 1799 obj = [] 1800 elif not isinstance(obj, (list, tuple)): 1801 obj = [obj] 1802 if field_is_type('list:string'): 1803 obj = map(str,obj) 1804 else: 1805 obj = map(int,obj) 1806 # we don't want to bar_encode json objects 1807 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1808 obj = bar_encode(obj) 1809 if obj is None: 1810 return 'NULL' 1811 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1812 return 'NULL' 1813 r = self.represent_exceptions(obj, fieldtype) 1814 if not r is None: 1815 return r 1816 if fieldtype == 'boolean': 1817 if obj and not str(obj)[:1].upper() in '0F': 1818 return self.smart_adapt(self.TRUE) 1819 else: 1820 return self.smart_adapt(self.FALSE) 1821 if fieldtype == 'id' or fieldtype == 'integer': 1822 return str(long(obj)) 1823 if field_is_type('decimal'): 1824 return str(obj) 1825 elif field_is_type('reference'): # reference 1826 if fieldtype.find('.')>0: 1827 return repr(obj) 1828 elif isinstance(obj, (Row, Reference)): 1829 return str(obj['id']) 1830 return str(long(obj)) 1831 elif fieldtype == 'double': 1832 return repr(float(obj)) 1833 if isinstance(obj, unicode): 1834 obj = obj.encode(self.db_codec) 1835 if fieldtype == 'blob': 1836 obj = base64.b64encode(str(obj)) 1837 elif fieldtype == 'date': 1838 if isinstance(obj, (datetime.date, datetime.datetime)): 1839 obj = obj.isoformat()[:10] 1840 else: 1841 obj = str(obj) 1842 elif fieldtype == 'datetime': 1843 if isinstance(obj, datetime.datetime): 1844 obj = obj.isoformat(self.T_SEP)[:19] 1845 elif isinstance(obj, datetime.date): 1846 obj = obj.isoformat()[:10]+' 00:00:00' 1847 else: 1848 obj = str(obj) 1849 elif fieldtype == 'time': 1850 if isinstance(obj, datetime.time): 1851 obj = obj.isoformat()[:10] 1852 else: 1853 obj = str(obj) 1854 elif fieldtype == 'json': 1855 if not self.native_json: 1856 if have_serializers: 1857 obj = serializers.json(obj) 1858 elif simplejson: 1859 obj = simplejson.dumps(items) 1860 else: 1861 raise RuntimeError("missing simplejson") 1862 if not isinstance(obj,bytes): 1863 obj = bytes(obj) 1864 try: 1865 obj.decode(self.db_codec) 1866 except: 1867 obj = obj.decode('latin1').encode(self.db_codec) 1868 return self.adapt(obj)
1869
1870 - def represent_exceptions(self, obj, fieldtype):
1871 return None
1872
1873 - def lastrowid(self, table):
1874 return None
1875
1876 - def integrity_error_class(self):
1877 return type(None)
1878
1879 - def rowslice(self, rows, minimum=0, maximum=None):
1880 """ 1881 By default this function does nothing; 1882 overload when db does not do slicing. 1883 """ 1884 return rows
1885
1886 - def parse_value(self, value, field_type, blob_decode=True):
1887 if field_type != 'blob' and isinstance(value, str): 1888 try: 1889 value = value.decode(self.db._db_codec) 1890 except Exception: 1891 pass 1892 if isinstance(value, unicode): 1893 value = value.encode('utf-8') 1894 if isinstance(field_type, SQLCustomType): 1895 value = field_type.decoder(value) 1896 if not isinstance(field_type, str) or value is None: 1897 return value 1898 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1899 return value 1900 elif field_type.startswith('geo'): 1901 return value 1902 elif field_type == 'blob' and not blob_decode: 1903 return value 1904 else: 1905 key = REGEX_TYPE.match(field_type).group(0) 1906 return self.parsemap[key](value,field_type)
1907
1908 - def parse_reference(self, value, field_type):
1909 referee = field_type[10:].strip() 1910 if not '.' in referee: 1911 value = Reference(value) 1912 value._table, value._record = self.db[referee], None 1913 return value
1914
1915 - def parse_boolean(self, value, field_type):
1916 return value == self.TRUE or str(value)[:1].lower() == 't'
1917
1918 - def parse_date(self, value, field_type):
1919 if isinstance(value, datetime.datetime): 1920 return value.date() 1921 if not isinstance(value, (datetime.date,datetime.datetime)): 1922 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1923 value = datetime.date(y, m, d) 1924 return value
1925
1926 - def parse_time(self, value, field_type):
1927 if not isinstance(value, datetime.time): 1928 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1929 if len(time_items) == 3: 1930 (h, mi, s) = time_items 1931 else: 1932 (h, mi, s) = time_items + [0] 1933 value = datetime.time(h, mi, s) 1934 return value
1935
1936 - def parse_datetime(self, value, field_type):
1937 if not isinstance(value, datetime.datetime): 1938 value = str(value) 1939 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1940 if '+' in timezone: 1941 ms,tz = timezone.split('+') 1942 h,m = tz.split(':') 1943 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1944 elif '-' in timezone: 1945 ms,tz = timezone.split('-') 1946 h,m = tz.split(':') 1947 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1948 else: 1949 dt = None 1950 (y, m, d) = map(int,date_part.split('-')) 1951 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1952 while len(time_parts)<3: time_parts.append(0) 1953 time_items = map(int,time_parts) 1954 (h, mi, s) = time_items 1955 value = datetime.datetime(y, m, d, h, mi, s) 1956 if dt: 1957 value = value + dt 1958 return value
1959
1960 - def parse_blob(self, value, field_type):
1961 return base64.b64decode(str(value))
1962
1963 - def parse_decimal(self, value, field_type):
1964 decimals = int(field_type[8:-1].split(',')[-1]) 1965 if self.dbengine in ('sqlite', 'spatialite'): 1966 value = ('%.' + str(decimals) + 'f') % value 1967 if not isinstance(value, decimal.Decimal): 1968 value = decimal.Decimal(str(value)) 1969 return value
1970
1971 - def parse_list_integers(self, value, field_type):
1972 if not isinstance(self, NoSQLAdapter): 1973 value = bar_decode_integer(value) 1974 return value
1975
1976 - def parse_list_references(self, value, field_type):
1977 if not isinstance(self, NoSQLAdapter): 1978 value = bar_decode_integer(value) 1979 return [self.parse_reference(r, field_type[5:]) for r in value]
1980
1981 - def parse_list_strings(self, value, field_type):
1982 if not isinstance(self, NoSQLAdapter): 1983 value = bar_decode_string(value) 1984 return value
1985
1986 - def parse_id(self, value, field_type):
1987 return long(value)
1988
1989 - def parse_integer(self, value, field_type):
1990 return long(value)
1991
1992 - def parse_double(self, value, field_type):
1993 return float(value)
1994
1995 - def parse_json(self, value, field_type):
1996 if not self.native_json: 1997 if not isinstance(value, basestring): 1998 raise RuntimeError('json data not a string') 1999 if isinstance(value, unicode): 2000 value = value.encode('utf-8') 2001 if have_serializers: 2002 value = serializers.loads_json(value) 2003 elif simplejson: 2004 value = simplejson.loads(value) 2005 else: 2006 raise RuntimeError("missing simplejson") 2007 return value
2008
2009 - def build_parsemap(self):
2010 self.parsemap = { 2011 'id':self.parse_id, 2012 'integer':self.parse_integer, 2013 'bigint':self.parse_integer, 2014 'float':self.parse_double, 2015 'double':self.parse_double, 2016 'reference':self.parse_reference, 2017 'boolean':self.parse_boolean, 2018 'date':self.parse_date, 2019 'time':self.parse_time, 2020 'datetime':self.parse_datetime, 2021 'blob':self.parse_blob, 2022 'decimal':self.parse_decimal, 2023 'json':self.parse_json, 2024 'list:integer':self.parse_list_integers, 2025 'list:reference':self.parse_list_references, 2026 'list:string':self.parse_list_strings, 2027 }
2028
2029 - def parse(self, rows, fields, colnames, blob_decode=True, 2030 cacheable = False):
2031 self.build_parsemap() 2032 db = self.db 2033 virtualtables = [] 2034 new_rows = [] 2035 tmps = [] 2036 for colname in colnames: 2037 if not REGEX_TABLE_DOT_FIELD.match(colname): 2038 tmps.append(None) 2039 else: 2040 (tablename, fieldname) = colname.split('.') 2041 table = db[tablename] 2042 field = table[fieldname] 2043 ft = field.type 2044 tmps.append((tablename,fieldname,table,field,ft)) 2045 for (i,row) in enumerate(rows): 2046 new_row = Row() 2047 for (j,colname) in enumerate(colnames): 2048 value = row[j] 2049 tmp = tmps[j] 2050 if tmp: 2051 (tablename,fieldname,table,field,ft) = tmp 2052 if tablename in new_row: 2053 colset = new_row[tablename] 2054 else: 2055 colset = new_row[tablename] = Row() 2056 if tablename not in virtualtables: 2057 virtualtables.append(tablename) 2058 value = self.parse_value(value,ft,blob_decode) 2059 if field.filter_out: 2060 value = field.filter_out(value) 2061 colset[fieldname] = value 2062 2063 # for backward compatibility 2064 if ft=='id' and fieldname!='id' and \ 2065 not 'id' in table.fields: 2066 colset['id'] = value 2067 2068 if ft == 'id' and not cacheable: 2069 # temporary hack to deal with 2070 # GoogleDatastoreAdapter 2071 # references 2072 if isinstance(self, GoogleDatastoreAdapter): 2073 id = value.key().id_or_name() 2074 colset[fieldname] = id 2075 colset.gae_item = value 2076 else: 2077 id = value 2078 colset.update_record = RecordUpdater(colset,table,id) 2079 colset.delete_record = RecordDeleter(table,id) 2080 for rfield in table._referenced_by: 2081 referee_link = db._referee_name and \ 2082 db._referee_name % dict( 2083 table=rfield.tablename,field=rfield.name) 2084 if referee_link and not referee_link in colset: 2085 colset[referee_link] = LazySet(rfield,id) 2086 else: 2087 if not '_extra' in new_row: 2088 new_row['_extra'] = Row() 2089 new_row['_extra'][colname] = \ 2090 self.parse_value(value, 2091 fields[j].type,blob_decode) 2092 new_column_name = \ 2093 REGEX_SELECT_AS_PARSER.search(colname) 2094 if not new_column_name is None: 2095 column_name = new_column_name.groups(0) 2096 setattr(new_row,column_name[0],value) 2097 new_rows.append(new_row) 2098 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2099 2100 for tablename in virtualtables: 2101 ### new style virtual fields 2102 table = db[tablename] 2103 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2104 if isinstance(v,FieldVirtual)] 2105 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2106 if isinstance(v,FieldMethod)] 2107 if fields_virtual or fields_lazy: 2108 for row in rowsobj.records: 2109 box = row[tablename] 2110 for f,v in fields_virtual: 2111 box[f] = v.f(row) 2112 for f,v in fields_lazy: 2113 box[f] = (v.handler or VirtualCommand)(v.f,row) 2114 2115 ### old style virtual fields 2116 for item in table.virtualfields: 2117 try: 2118 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2119 except (KeyError, AttributeError): 2120 # to avoid breaking virtualfields when partial select 2121 pass 2122 return rowsobj
2123
2124 - def common_filter(self, query, tablenames):
2125 tenant_fieldname = self.db._request_tenant 2126 2127 for tablename in tablenames: 2128 table = self.db[tablename] 2129 2130 # deal with user provided filters 2131 if table._common_filter != None: 2132 query = query & table._common_filter(query) 2133 2134 # deal with multi_tenant filters 2135 if tenant_fieldname in table: 2136 default = table[tenant_fieldname].default 2137 if not default is None: 2138 newquery = table[tenant_fieldname] == default 2139 if query is None: 2140 query = newquery 2141 else: 2142 query = query & newquery 2143 return query
2144
2145 - def CASE(self,query,t,f):
2146 def represent(x): 2147 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2148 if x is None: return 'NULL' 2149 elif isinstance(x,Expression): return str(x) 2150 else: return self.represent(x,types.get(type(x),'string'))
2151 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2152 (self.expand(query),represent(t),represent(f))) 2153
2154 ################################################################################### 2155 # List of all the available adapters; they all extend BaseAdapter. 2156 ################################################################################### 2157 2158 -class SQLiteAdapter(BaseAdapter):
2159 drivers = ('sqlite2','sqlite3') 2160 2161 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2162
2163 - def EXTRACT(self,field,what):
2164 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2165 2166 @staticmethod
2167 - def web2py_extract(lookup, s):
2168 table = { 2169 'year': (0, 4), 2170 'month': (5, 7), 2171 'day': (8, 10), 2172 'hour': (11, 13), 2173 'minute': (14, 16), 2174 'second': (17, 19), 2175 } 2176 try: 2177 if lookup != 'epoch': 2178 (i, j) = table[lookup] 2179 return int(s[i:j]) 2180 else: 2181 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2182 except: 2183 return None
2184 2185 @staticmethod
2186 - def web2py_regexp(expression, item):
2187 return re.compile(expression).search(item) is not None
2188
2189 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2190 credential_decoder=IDENTITY, driver_args={}, 2191 adapter_args={}, do_connect=True, after_connection=None):
2192 self.db = db 2193 self.dbengine = "sqlite" 2194 self.uri = uri 2195 if do_connect: self.find_driver(adapter_args) 2196 self.pool_size = 0 2197 self.folder = folder 2198 self.db_codec = db_codec 2199 self._after_connection = after_connection 2200 self.find_or_make_work_folder() 2201 path_encoding = sys.getfilesystemencoding() \ 2202 or locale.getdefaultlocale()[1] or 'utf8' 2203 if uri.startswith('sqlite:memory'): 2204 dbpath = ':memory:' 2205 else: 2206 dbpath = uri.split('://',1)[1] 2207 if dbpath[0] != '/': 2208 if PYTHON_VERSION == 2: 2209 dbpath = pjoin( 2210 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2211 else: 2212 dbpath = pjoin(self.folder, dbpath) 2213 if not 'check_same_thread' in driver_args: 2214 driver_args['check_same_thread'] = False 2215 if not 'detect_types' in driver_args and do_connect: 2216 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2217 def connector(dbpath=dbpath, driver_args=driver_args): 2218 return self.driver.Connection(dbpath, **driver_args)
2219 self.connector = connector 2220 if do_connect: self.reconnect()
2221
2222 - def after_connection(self):
2223 self.connection.create_function('web2py_extract', 2, 2224 SQLiteAdapter.web2py_extract) 2225 self.connection.create_function("REGEXP", 2, 2226 SQLiteAdapter.web2py_regexp)
2227
2228 - def _truncate(self, table, mode=''):
2229 tablename = table._tablename 2230 return ['DELETE FROM %s;' % tablename, 2231 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2232
2233 - def lastrowid(self, table):
2234 return self.cursor.lastrowid
2235
2236 - def REGEXP(self,first,second):
2237 return '(%s REGEXP %s)' % (self.expand(first), 2238 self.expand(second,'string'))
2239
2240 - def select(self, query, fields, attributes):
2241 """ 2242 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2243 Note that the entire database, rather than one record, is locked 2244 (it will be locked eventually anyway by the following UPDATE). 2245 """ 2246 if attributes.get('for_update', False) and not 'cache' in attributes: 2247 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2248 return super(SQLiteAdapter, self).select(query, fields, attributes)
2249
2250 -class SpatiaLiteAdapter(SQLiteAdapter):
2251 drivers = ('sqlite3','sqlite2') 2252 2253 types = copy.copy(BaseAdapter.types) 2254 types.update(geometry='GEOMETRY') 2255
2256 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2257 credential_decoder=IDENTITY, driver_args={}, 2258 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2259 self.db = db 2260 self.dbengine = "spatialite" 2261 self.uri = uri 2262 if do_connect: self.find_driver(adapter_args) 2263 self.pool_size = 0 2264 self.folder = folder 2265 self.db_codec = db_codec 2266 self._after_connection = after_connection 2267 self.find_or_make_work_folder() 2268 self.srid = srid 2269 path_encoding = sys.getfilesystemencoding() \ 2270 or locale.getdefaultlocale()[1] or 'utf8' 2271 if uri.startswith('spatialite:memory'): 2272 dbpath = ':memory:' 2273 else: 2274 dbpath = uri.split('://',1)[1] 2275 if dbpath[0] != '/': 2276 dbpath = pjoin( 2277 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2278 if not 'check_same_thread' in driver_args: 2279 driver_args['check_same_thread'] = False 2280 if not 'detect_types' in driver_args and do_connect: 2281 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2282 def connector(dbpath=dbpath, driver_args=driver_args): 2283 return self.driver.Connection(dbpath, **driver_args)
2284 self.connector = connector 2285 if do_connect: self.reconnect()
2286
2287 - def after_connection(self):
2288 self.connection.enable_load_extension(True) 2289 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2290 # Linux uses libspatialite.so 2291 # Mac OS X uses libspatialite.dylib 2292 libspatialite = SPATIALLIBS[platform.system()] 2293 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2294 2295 self.connection.create_function('web2py_extract', 2, 2296 SQLiteAdapter.web2py_extract) 2297 self.connection.create_function("REGEXP", 2, 2298 SQLiteAdapter.web2py_regexp)
2299 2300 # GIS functions 2301
2302 - def ST_ASGEOJSON(self, first, second):
2303 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2304 second['precision'], second['options'])
2305
2306 - def ST_ASTEXT(self, first):
2307 return 'AsText(%s)' %(self.expand(first))
2308
2309 - def ST_CONTAINS(self, first, second):
2310 return 'Contains(%s,%s)' %(self.expand(first), 2311 self.expand(second, first.type))
2312
2313 - def ST_DISTANCE(self, first, second):
2314 return 'Distance(%s,%s)' %(self.expand(first), 2315 self.expand(second, first.type))
2316
2317 - def ST_EQUALS(self, first, second):
2318 return 'Equals(%s,%s)' %(self.expand(first), 2319 self.expand(second, first.type))
2320
2321 - def ST_INTERSECTS(self, first, second):
2322 return 'Intersects(%s,%s)' %(self.expand(first), 2323 self.expand(second, first.type))
2324
2325 - def ST_OVERLAPS(self, first, second):
2326 return 'Overlaps(%s,%s)' %(self.expand(first), 2327 self.expand(second, first.type))
2328
2329 - def ST_SIMPLIFY(self, first, second):
2330 return 'Simplify(%s,%s)' %(self.expand(first), 2331 self.expand(second, 'double'))
2332
2333 - def ST_TOUCHES(self, first, second):
2334 return 'Touches(%s,%s)' %(self.expand(first), 2335 self.expand(second, first.type))
2336
2337 - def ST_WITHIN(self, first, second):
2338 return 'Within(%s,%s)' %(self.expand(first), 2339 self.expand(second, first.type))
2340
2341 - def represent(self, obj, fieldtype):
2342 field_is_type = fieldtype.startswith 2343 if field_is_type('geo'): 2344 srid = 4326 # Spatialite default srid for geometry 2345 geotype, parms = fieldtype[:-1].split('(') 2346 parms = parms.split(',') 2347 if len(parms) >= 2: 2348 schema, srid = parms[:2] 2349 # if field_is_type('geometry'): 2350 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2351 # elif field_is_type('geography'): 2352 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2353 # else: 2354 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2355 return value 2356 return BaseAdapter.represent(self, obj, fieldtype)
2357
2358 2359 -class JDBCSQLiteAdapter(SQLiteAdapter):
2360 drivers = ('zxJDBC_sqlite',) 2361
2362 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2363 credential_decoder=IDENTITY, driver_args={}, 2364 adapter_args={}, do_connect=True, after_connection=None):
2365 self.db = db 2366 self.dbengine = "sqlite" 2367 self.uri = uri 2368 if do_connect: self.find_driver(adapter_args) 2369 self.pool_size = pool_size 2370 self.folder = folder 2371 self.db_codec = db_codec 2372 self._after_connection = after_connection 2373 self.find_or_make_work_folder() 2374 path_encoding = sys.getfilesystemencoding() \ 2375 or locale.getdefaultlocale()[1] or 'utf8' 2376 if uri.startswith('sqlite:memory'): 2377 dbpath = ':memory:' 2378 else: 2379 dbpath = uri.split('://',1)[1] 2380 if dbpath[0] != '/': 2381 dbpath = pjoin( 2382 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2383 def connector(dbpath=dbpath,driver_args=driver_args): 2384 return self.driver.connect( 2385 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2386 **driver_args)
2387 self.connector = connector 2388 if do_connect: self.reconnect()
2389
2390 - def after_connection(self):
2391 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2392 self.connection.create_function('web2py_extract', 2, 2393 SQLiteAdapter.web2py_extract)
2394
2395 - def execute(self, a):
2396 return self.log_execute(a)
2397
2398 2399 -class MySQLAdapter(BaseAdapter):
2400 drivers = ('MySQLdb','pymysql') 2401 2402 maxcharlength = 255 2403 commit_on_alter_table = True 2404 support_distributed_transaction = True 2405 types = { 2406 'boolean': 'CHAR(1)', 2407 'string': 'VARCHAR(%(length)s)', 2408 'text': 'LONGTEXT', 2409 'json': 'LONGTEXT', 2410 'password': 'VARCHAR(%(length)s)', 2411 'blob': 'LONGBLOB', 2412 'upload': 'VARCHAR(%(length)s)', 2413 'integer': 'INT', 2414 'bigint': 'BIGINT', 2415 'float': 'FLOAT', 2416 'double': 'DOUBLE', 2417 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2418 'date': 'DATE', 2419 'time': 'TIME', 2420 'datetime': 'DATETIME', 2421 'id': 'INT AUTO_INCREMENT NOT NULL', 2422 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2423 'list:integer': 'LONGTEXT', 2424 'list:string': 'LONGTEXT', 2425 'list:reference': 'LONGTEXT', 2426 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2427 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2428 } 2429
2430 - def varquote(self,name):
2431 return varquote_aux(name,'`%s`')
2432
2433 - def RANDOM(self):
2434 return 'RAND()'
2435
2436 - def SUBSTRING(self,field,parameters):
2437 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2438 parameters[0], parameters[1])
2439
2440 - def EPOCH(self, first):
2441 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2442
2443 - def CONCAT(self, *items):
2444 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2445
2446 - def REGEXP(self,first,second):
2447 return '(%s REGEXP %s)' % (self.expand(first), 2448 self.expand(second,'string'))
2449
2450 - def _drop(self,table,mode):
2451 # breaks db integrity but without this mysql does not drop table 2452 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2453 'SET FOREIGN_KEY_CHECKS=1;']
2454
2455 - def _insert_empty(self, table):
2456 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2457
2458 - def distributed_transaction_begin(self,key):
2459 self.execute('XA START;')
2460
2461 - def prepare(self,key):
2462 self.execute("XA END;") 2463 self.execute("XA PREPARE;")
2464
2465 - def commit_prepared(self,ley):
2466 self.execute("XA COMMIT;")
2467
2468 - def rollback_prepared(self,key):
2469 self.execute("XA ROLLBACK;")
2470 2471 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2472
2473 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2474 credential_decoder=IDENTITY, driver_args={}, 2475 adapter_args={}, do_connect=True, after_connection=None):
2476 self.db = db 2477 self.dbengine = "mysql" 2478 self.uri = uri 2479 if do_connect: self.find_driver(adapter_args,uri) 2480 self.pool_size = pool_size 2481 self.folder = folder 2482 self.db_codec = db_codec 2483 self._after_connection = after_connection 2484 self.find_or_make_work_folder() 2485 ruri = uri.split('://',1)[1] 2486 m = self.REGEX_URI.match(ruri) 2487 if not m: 2488 raise SyntaxError( 2489 "Invalid URI string in DAL: %s" % self.uri) 2490 user = credential_decoder(m.group('user')) 2491 if not user: 2492 raise SyntaxError('User required') 2493 password = credential_decoder(m.group('password')) 2494 if not password: 2495 password = '' 2496 host = m.group('host') 2497 if not host: 2498 raise SyntaxError('Host name required') 2499 db = m.group('db') 2500 if not db: 2501 raise SyntaxError('Database name required') 2502 port = int(m.group('port') or '3306') 2503 charset = m.group('charset') or 'utf8' 2504 driver_args.update(db=db, 2505 user=credential_decoder(user), 2506 passwd=credential_decoder(password), 2507 host=host, 2508 port=port, 2509 charset=charset) 2510 2511 2512 def connector(driver_args=driver_args): 2513 return self.driver.connect(**driver_args)
2514 self.connector = connector 2515 if do_connect: self.reconnect()
2516
2517 - def after_connection(self):
2518 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2519 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2520
2521 - def lastrowid(self,table):
2522 self.execute('select last_insert_id();') 2523 return int(self.cursor.fetchone()[0])
2524
2525 - def integrity_error_class(self):
2526 return self.cursor.IntegrityError
2527
2528 -class PostgreSQLAdapter(BaseAdapter):
2529 drivers = ('psycopg2','pg8000') 2530 2531 support_distributed_transaction = True 2532 types = { 2533 'boolean': 'CHAR(1)', 2534 'string': 'VARCHAR(%(length)s)', 2535 'text': 'TEXT', 2536 'json': 'TEXT', 2537 'password': 'VARCHAR(%(length)s)', 2538 'blob': 'BYTEA', 2539 'upload': 'VARCHAR(%(length)s)', 2540 'integer': 'INTEGER', 2541 'bigint': 'BIGINT', 2542 'float': 'FLOAT', 2543 'double': 'FLOAT8', 2544 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2545 'date': 'DATE', 2546 'time': 'TIME', 2547 'datetime': 'TIMESTAMP', 2548 'id': 'SERIAL PRIMARY KEY', 2549 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2550 'list:integer': 'TEXT', 2551 'list:string': 'TEXT', 2552 'list:reference': 'TEXT', 2553 'geometry': 'GEOMETRY', 2554 'geography': 'GEOGRAPHY', 2555 'big-id': 'BIGSERIAL PRIMARY KEY', 2556 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2557 } 2558
2559 - def varquote(self,name):
2560 return varquote_aux(name,'"%s"')
2561
2562 - def adapt(self,obj):
2563 if self.driver_name == 'psycopg2': 2564 return psycopg2_adapt(obj).getquoted() 2565 elif self.driver_name == 'pg8000': 2566 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2567 else: 2568 return "'%s'" % str(obj).replace("'","''")
2569
2570 - def sequence_name(self,table):
2571 return '%s_id_Seq' % table
2572
2573 - def RANDOM(self):
2574 return 'RANDOM()'
2575
2576 - def ADD(self, first, second):
2577 t = first.type 2578 if t in ('text','string','password', 'json', 'upload','blob'): 2579 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2580 else: 2581 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2582
2583 - def distributed_transaction_begin(self,key):
2584 return
2585
2586 - def prepare(self,key):
2587 self.execute("PREPARE TRANSACTION '%s';" % key)
2588
2589 - def commit_prepared(self,key):
2590 self.execute("COMMIT PREPARED '%s';" % key)
2591
2592 - def rollback_prepared(self,key):
2593 self.execute("ROLLBACK PREPARED '%s';" % key)
2594
2595 - def create_sequence_and_triggers(self, query, table, **args):
2596 # following lines should only be executed if table._sequence_name does not exist 2597 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2598 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2599 # % (table._tablename, table._fieldname, table._sequence_name)) 2600 self.execute(query)
2601 2602 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2603
2604 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2605 credential_decoder=IDENTITY, driver_args={}, 2606 adapter_args={}, do_connect=True, srid=4326, 2607 after_connection=None):
2608 self.db = db 2609 self.dbengine = "postgres" 2610 self.uri = uri 2611 if do_connect: self.find_driver(adapter_args,uri) 2612 self.pool_size = pool_size 2613 self.folder = folder 2614 self.db_codec = db_codec 2615 self._after_connection = after_connection 2616 self.srid = srid 2617 self.find_or_make_work_folder() 2618 ruri = uri.split('://',1)[1] 2619 m = self.REGEX_URI.match(ruri) 2620 if not m: 2621 raise SyntaxError("Invalid URI string in DAL") 2622 user = credential_decoder(m.group('user')) 2623 if not user: 2624 raise SyntaxError('User required') 2625 password = credential_decoder(m.group('password')) 2626 if not password: 2627 password = '' 2628 host = m.group('host') 2629 if not host: 2630 raise SyntaxError('Host name required') 2631 db = m.group('db') 2632 if not db: 2633 raise SyntaxError('Database name required') 2634 port = m.group('port') or '5432' 2635 sslmode = m.group('sslmode') 2636 if sslmode: 2637 msg = ("dbname='%s' user='%s' host='%s' " 2638 "port=%s password='%s' sslmode='%s'") \ 2639 % (db, user, host, port, password, sslmode) 2640 else: 2641 msg = ("dbname='%s' user='%s' host='%s' " 2642 "port=%s password='%s'") \ 2643 % (db, user, host, port, password) 2644 # choose diver according uri 2645 if self.driver: 2646 self.__version__ = "%s %s" % (self.driver.__name__, 2647 self.driver.__version__) 2648 else: 2649 self.__version__ = None 2650 def connector(msg=msg,driver_args=driver_args): 2651 return self.driver.connect(msg,**driver_args)
2652 self.connector = connector 2653 if do_connect: self.reconnect()
2654
2655 - def after_connection(self):
2656 self.connection.set_client_encoding('UTF8') 2657 self.execute("SET standard_conforming_strings=on;") 2658 self.try_json()
2659
2660 - def lastrowid(self,table):
2661 self.execute("select currval('%s')" % table._sequence_name) 2662 return int(self.cursor.fetchone()[0])
2663
2664 - def try_json(self):
2665 # check JSON data type support 2666 # (to be added to after_connection) 2667 if self.driver_name == "pg8000": 2668 supports_json = self.connection.server_version >= "9.2.0" 2669 elif (self.driver_name == "psycopg2") and \ 2670 (self.driver.__version__ >= "2.0.12"): 2671 supports_json = self.connection.server_version >= 90200 2672 elif self.driver_name == "zxJDBC": 2673 supports_json = self.connection.dbversion >= "9.2.0" 2674 else: supports_json = None 2675 if supports_json: self.types["json"] = "JSON" 2676 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2677
2678 - def LIKE(self,first,second):
2679 args = (self.expand(first), self.expand(second,'string')) 2680 if not first.type in ('string', 'text', 'json'): 2681 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2682 else: 2683 return '(%s LIKE %s)' % args
2684
2685 - def ILIKE(self,first,second):
2686 args = (self.expand(first), self.expand(second,'string')) 2687 if not first.type in ('string', 'text', 'json'): 2688 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2689 else: 2690 return '(%s ILIKE %s)' % args
2691
2692 - def REGEXP(self,first,second):
2693 return '(%s ~ %s)' % (self.expand(first), 2694 self.expand(second,'string'))
2695
2696 - def STARTSWITH(self,first,second):
2697 return '(%s ILIKE %s)' % (self.expand(first), 2698 self.expand(second+'%','string'))
2699
2700 - def ENDSWITH(self,first,second):
2701 return '(%s ILIKE %s)' % (self.expand(first), 2702 self.expand('%'+second,'string'))
2703 2704 # GIS functions 2705
2706 - def ST_ASGEOJSON(self, first, second):
2707 """ 2708 http://postgis.org/docs/ST_AsGeoJSON.html 2709 """ 2710 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2711 self.expand(first), second['precision'], second['options'])
2712
2713 - def ST_ASTEXT(self, first):
2714 """ 2715 http://postgis.org/docs/ST_AsText.html 2716 """ 2717 return 'ST_AsText(%s)' %(self.expand(first))
2718
2719 - def ST_X(self, first):
2720 """ 2721 http://postgis.org/docs/ST_X.html 2722 """ 2723 return 'ST_X(%s)' %(self.expand(first))
2724
2725 - def ST_Y(self, first):
2726 """ 2727 http://postgis.org/docs/ST_Y.html 2728 """ 2729 return 'ST_Y(%s)' %(self.expand(first))
2730
2731 - def ST_CONTAINS(self, first, second):
2732 """ 2733 http://postgis.org/docs/ST_Contains.html 2734 """ 2735 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2736
2737 - def ST_DISTANCE(self, first, second):
2738 """ 2739 http://postgis.org/docs/ST_Distance.html 2740 """ 2741 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2742
2743 - def ST_EQUALS(self, first, second):
2744 """ 2745 http://postgis.org/docs/ST_Equals.html 2746 """ 2747 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2748
2749 - def ST_INTERSECTS(self, first, second):
2750 """ 2751 http://postgis.org/docs/ST_Intersects.html 2752 """ 2753 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2754
2755 - def ST_OVERLAPS(self, first, second):
2756 """ 2757 http://postgis.org/docs/ST_Overlaps.html 2758 """ 2759 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2760
2761 - def ST_SIMPLIFY(self, first, second):
2762 """ 2763 http://postgis.org/docs/ST_Simplify.html 2764 """ 2765 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2766
2767 - def ST_TOUCHES(self, first, second):
2768 """ 2769 http://postgis.org/docs/ST_Touches.html 2770 """ 2771 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2772
2773 - def ST_WITHIN(self, first, second):
2774 """ 2775 http://postgis.org/docs/ST_Within.html 2776 """ 2777 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2778
2779 - def represent(self, obj, fieldtype):
2780 field_is_type = fieldtype.startswith 2781 if field_is_type('geo'): 2782 srid = 4326 # postGIS default srid for geometry 2783 geotype, parms = fieldtype[:-1].split('(') 2784 parms = parms.split(',') 2785 if len(parms) >= 2: 2786 schema, srid = parms[:2] 2787 if field_is_type('geometry'): 2788 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2789 elif field_is_type('geography'): 2790 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2791 # else: 2792 # raise SyntaxError('Invalid field type %s' %fieldtype) 2793 return value 2794 return BaseAdapter.represent(self, obj, fieldtype)
2795
2796 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2797 drivers = ('psycopg2','pg8000') 2798 2799 types = { 2800 'boolean': 'CHAR(1)', 2801 'string': 'VARCHAR(%(length)s)', 2802 'text': 'TEXT', 2803 'json': 'TEXT', 2804 'password': 'VARCHAR(%(length)s)', 2805 'blob': 'BYTEA', 2806 'upload': 'VARCHAR(%(length)s)', 2807 'integer': 'INTEGER', 2808 'bigint': 'BIGINT', 2809 'float': 'FLOAT', 2810 'double': 'FLOAT8', 2811 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2812 'date': 'DATE', 2813 'time': 'TIME', 2814 'datetime': 'TIMESTAMP', 2815 'id': 'SERIAL PRIMARY KEY', 2816 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2817 'list:integer': 'BIGINT[]', 2818 'list:string': 'TEXT[]', 2819 'list:reference': 'BIGINT[]', 2820 'geometry': 'GEOMETRY', 2821 'geography': 'GEOGRAPHY', 2822 'big-id': 'BIGSERIAL PRIMARY KEY', 2823 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2824 } 2825
2826 - def parse_list_integers(self, value, field_type):
2827 return value
2828
2829 - def parse_list_references(self, value, field_type):
2830 return [self.parse_reference(r, field_type[5:]) for r in value]
2831
2832 - def parse_list_strings(self, value, field_type):
2833 return value
2834
2835 - def represent(self, obj, fieldtype):
2836 field_is_type = fieldtype.startswith 2837 if field_is_type('list:'): 2838 if not obj: 2839 obj = [] 2840 elif not isinstance(obj, (list, tuple)): 2841 obj = [obj] 2842 if field_is_type('list:string'): 2843 obj = map(str,obj) 2844 else: 2845 obj = map(int,obj) 2846 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2847 return BaseAdapter.represent(self, obj, fieldtype)
2848
2849 2850 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2851 drivers = ('zxJDBC',) 2852 2853 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2854
2855 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2856 credential_decoder=IDENTITY, driver_args={}, 2857 adapter_args={}, do_connect=True, after_connection=None ):
2858 self.db = db 2859 self.dbengine = "postgres" 2860 self.uri = uri 2861 if do_connect: self.find_driver(adapter_args,uri) 2862 self.pool_size = pool_size 2863 self.folder = folder 2864 self.db_codec = db_codec 2865 self._after_connection = after_connection 2866 self.find_or_make_work_folder() 2867 ruri = uri.split('://',1)[1] 2868 m = self.REGEX_URI.match(ruri) 2869 if not m: 2870 raise SyntaxError("Invalid URI string in DAL") 2871 user = credential_decoder(m.group('user')) 2872 if not user: 2873 raise SyntaxError('User required') 2874 password = credential_decoder(m.group('password')) 2875 if not password: 2876 password = '' 2877 host = m.group('host') 2878 if not host: 2879 raise SyntaxError('Host name required') 2880 db = m.group('db') 2881 if not db: 2882 raise SyntaxError('Database name required') 2883 port = m.group('port') or '5432' 2884 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2885 def connector(msg=msg,driver_args=driver_args): 2886 return self.driver.connect(*msg,**driver_args)
2887 self.connector = connector 2888 if do_connect: self.reconnect()
2889
2890 - def after_connection(self):
2891 self.connection.set_client_encoding('UTF8') 2892 self.execute('BEGIN;') 2893 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2894 self.try_json()
2895
2896 2897 -class OracleAdapter(BaseAdapter):
2898 drivers = ('cx_Oracle',) 2899 2900 commit_on_alter_table = False 2901 types = { 2902 'boolean': 'CHAR(1)', 2903 'string': 'VARCHAR2(%(length)s)', 2904 'text': 'CLOB', 2905 'json': 'CLOB', 2906 'password': 'VARCHAR2(%(length)s)', 2907 'blob': 'CLOB', 2908 'upload': 'VARCHAR2(%(length)s)', 2909 'integer': 'INT', 2910 'bigint': 'NUMBER', 2911 'float': 'FLOAT', 2912 'double': 'BINARY_DOUBLE', 2913 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2914 'date': 'DATE', 2915 'time': 'CHAR(8)', 2916 'datetime': 'DATE', 2917 'id': 'NUMBER PRIMARY KEY', 2918 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2919 'list:integer': 'CLOB', 2920 'list:string': 'CLOB', 2921 'list:reference': 'CLOB', 2922 'big-id': 'NUMBER PRIMARY KEY', 2923 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2924 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2925 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2926 } 2927
2928 - def sequence_name(self,tablename):
2929 return '%s_sequence' % tablename
2930
2931 - def trigger_name(self,tablename):
2932 return '%s_trigger' % tablename
2933
2934 - def LEFT_JOIN(self):
2935 return 'LEFT OUTER JOIN'
2936
2937 - def RANDOM(self):
2938 return 'dbms_random.value'
2939
2940 - def NOT_NULL(self,default,field_type):
2941 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2942
2943 - def _drop(self,table,mode):
2944 sequence_name = table._sequence_name 2945 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2946
2947 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2948 if limitby: 2949 (lmin, lmax) = limitby 2950 if len(sql_w) > 1: 2951 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2952 else: 2953 sql_w_row = 'WHERE w_row > %i' % lmin 2954 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2955 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2956
2957 - def constraint_name(self, tablename, fieldname):
2958 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2959 if len(constraint_name)>30: 2960 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2961 return constraint_name
2962
2963 - def represent_exceptions(self, obj, fieldtype):
2964 if fieldtype == 'blob': 2965 obj = base64.b64encode(str(obj)) 2966 return ":CLOB('%s')" % obj 2967 elif fieldtype == 'date': 2968 if isinstance(obj, (datetime.date, datetime.datetime)): 2969 obj = obj.isoformat()[:10] 2970 else: 2971 obj = str(obj) 2972 return "to_date('%s','yyyy-mm-dd')" % obj 2973 elif fieldtype == 'datetime': 2974 if isinstance(obj, datetime.datetime): 2975 obj = obj.isoformat()[:19].replace('T',' ') 2976 elif isinstance(obj, datetime.date): 2977 obj = obj.isoformat()[:10]+' 00:00:00' 2978 else: 2979 obj = str(obj) 2980 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2981 return None
2982
2983 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2984 credential_decoder=IDENTITY, driver_args={}, 2985 adapter_args={}, do_connect=True, after_connection=None):
2986 self.db = db 2987 self.dbengine = "oracle" 2988 self.uri = uri 2989 if do_connect: self.find_driver(adapter_args,uri) 2990 self.pool_size = pool_size 2991 self.folder = folder 2992 self.db_codec = db_codec 2993 self._after_connection = after_connection 2994 self.find_or_make_work_folder() 2995 ruri = uri.split('://',1)[1] 2996 if not 'threaded' in driver_args: 2997 driver_args['threaded']=True 2998 def connector(uri=ruri,driver_args=driver_args): 2999 return self.driver.connect(uri,**driver_args)
3000 self.connector = connector 3001 if do_connect: self.reconnect()
3002
3003 - def after_connection(self):
3004 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3005 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3006 3007 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3008
3009 - def execute(self, command, args=None):
3010 args = args or [] 3011 i = 1 3012 while True: 3013 m = self.oracle_fix.match(command) 3014 if not m: 3015 break 3016 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3017 args.append(m.group('clob')[6:-2].replace("''", "'")) 3018 i += 1 3019 if command[-1:]==';': 3020 command = command[:-1] 3021 return self.log_execute(command, args)
3022
3023 - def create_sequence_and_triggers(self, query, table, **args):
3024 tablename = table._tablename 3025 sequence_name = table._sequence_name 3026 trigger_name = table._trigger_name 3027 self.execute(query) 3028 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3029 self.execute(""" 3030 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3031 DECLARE 3032 curr_val NUMBER; 3033 diff_val NUMBER; 3034 PRAGMA autonomous_transaction; 3035 BEGIN 3036 IF :NEW.id IS NOT NULL THEN 3037 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3038 diff_val := :NEW.id - curr_val - 1; 3039 IF diff_val != 0 THEN 3040 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3041 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3042 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3043 END IF; 3044 END IF; 3045 SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL; 3046 END; 3047 """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name))
3048
3049 - def lastrowid(self,table):
3050 sequence_name = table._sequence_name 3051 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3052 return long(self.cursor.fetchone()[0])
3053 3054 #def parse_value(self, value, field_type, blob_decode=True): 3055 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3056 # try: 3057 # value = value.read() 3058 # except self.driver.ProgrammingError: 3059 # # After a subsequent fetch the LOB value is not valid anymore 3060 # pass 3061 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3062
3063 - def _fetchall(self):
3064 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3065 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3066 for c in r]) for r in self.cursor] 3067 else: 3068 return self.cursor.fetchall()
3069
3070 -class MSSQLAdapter(BaseAdapter):
3071 drivers = ('pyodbc',) 3072 T_SEP = 'T' 3073 3074 types = { 3075 'boolean': 'BIT', 3076 'string': 'VARCHAR(%(length)s)', 3077 'text': 'TEXT', 3078 'json': 'TEXT', 3079 'password': 'VARCHAR(%(length)s)', 3080 'blob': 'IMAGE', 3081 'upload': 'VARCHAR(%(length)s)', 3082 'integer': 'INT', 3083 'bigint': 'BIGINT', 3084 'float': 'FLOAT', 3085 'double': 'FLOAT', 3086 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3087 'date': 'DATETIME', 3088 'time': 'CHAR(8)', 3089 'datetime': 'DATETIME', 3090 'id': 'INT IDENTITY PRIMARY KEY', 3091 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3092 'list:integer': 'TEXT', 3093 'list:string': 'TEXT', 3094 'list:reference': 'TEXT', 3095 'geometry': 'geometry', 3096 'geography': 'geography', 3097 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3098 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3099 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3100 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3101 } 3102
3103 - def concat_add(self,tablename):
3104 return '; ALTER TABLE %s ADD ' % tablename
3105
3106 - def varquote(self,name):
3107 return varquote_aux(name,'[%s]')
3108
3109 - def EXTRACT(self,field,what):
3110 return "DATEPART(%s,%s)" % (what, self.expand(field))
3111
3112 - def LEFT_JOIN(self):
3113 return 'LEFT OUTER JOIN'
3114
3115 - def RANDOM(self):
3116 return 'NEWID()'
3117
3118 - def ALLOW_NULL(self):
3119 return ' NULL'
3120
3121 - def SUBSTRING(self,field,parameters):
3122 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3123
3124 - def PRIMARY_KEY(self,key):
3125 return 'PRIMARY KEY CLUSTERED (%s)' % key
3126
3127 - def AGGREGATE(self, first, what):
3128 if what == 'LENGTH': 3129 what = 'LEN' 3130 return "%s(%s)" % (what, self.expand(first))
3131 3132
3133 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3134 if limitby: 3135 (lmin, lmax) = limitby 3136 sql_s += ' TOP %i' % lmax 3137 if 'GROUP BY' in sql_o: 3138 orderfound = sql_o.find('ORDER BY ') 3139 if orderfound >= 0: 3140 sql_o = sql_o[:orderfound] 3141 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3142 3143 TRUE = 1 3144 FALSE = 0 3145 3146 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3147 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3148 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3149
3150 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3151 credential_decoder=IDENTITY, driver_args={}, 3152 adapter_args={}, do_connect=True, srid=4326, 3153 after_connection=None):
3154 self.db = db 3155 self.dbengine = "mssql" 3156 self.uri = uri 3157 if do_connect: self.find_driver(adapter_args,uri) 3158 self.pool_size = pool_size 3159 self.folder = folder 3160 self.db_codec = db_codec 3161 self._after_connection = after_connection 3162 self.srid = srid 3163 self.find_or_make_work_folder() 3164 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3165 ruri = uri.split('://',1)[1] 3166 if '@' not in ruri: 3167 try: 3168 m = self.REGEX_DSN.match(ruri) 3169 if not m: 3170 raise SyntaxError( 3171 'Parsing uri string(%s) has no result' % self.uri) 3172 dsn = m.group('dsn') 3173 if not dsn: 3174 raise SyntaxError('DSN required') 3175 except SyntaxError: 3176 e = sys.exc_info()[1] 3177 LOGGER.error('NdGpatch error') 3178 raise e 3179 # was cnxn = 'DSN=%s' % dsn 3180 cnxn = dsn 3181 else: 3182 m = self.REGEX_URI.match(ruri) 3183 if not m: 3184 raise SyntaxError( 3185 "Invalid URI string in DAL: %s" % self.uri) 3186 user = credential_decoder(m.group('user')) 3187 if not user: 3188 raise SyntaxError('User required') 3189 password = credential_decoder(m.group('password')) 3190 if not password: 3191 password = '' 3192 host = m.group('host') 3193 if not host: 3194 raise SyntaxError('Host name required') 3195 db = m.group('db') 3196 if not db: 3197 raise SyntaxError('Database name required') 3198 port = m.group('port') or '1433' 3199 # Parse the optional url name-value arg pairs after the '?' 3200 # (in the form of arg1=value1&arg2=value2&...) 3201 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3202 argsdict = { 'DRIVER':'{SQL Server}' } 3203 urlargs = m.group('urlargs') or '' 3204 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3205 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3206 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3207 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3208 % (host, port, db, user, password, urlargs) 3209 def connector(cnxn=cnxn,driver_args=driver_args): 3210 return self.driver.connect(cnxn,**driver_args)
3211 self.connector = connector 3212 if do_connect: self.reconnect()
3213
3214 - def lastrowid(self,table):
3215 #self.execute('SELECT @@IDENTITY;') 3216 self.execute('SELECT SCOPE_IDENTITY();') 3217 return long(self.cursor.fetchone()[0])
3218
3219 - def integrity_error_class(self):
3220 return pyodbc.IntegrityError
3221
3222 - def rowslice(self,rows,minimum=0,maximum=None):
3223 if maximum is None: 3224 return rows[minimum:] 3225 return rows[minimum:maximum]
3226
3227 - def EPOCH(self, first):
3228 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3229
3230 - def CONCAT(self, *items):
3231 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3232 3233 # GIS Spatial Extensions 3234 3235 # No STAsGeoJSON in MSSQL 3236
3237 - def ST_ASTEXT(self, first):
3238 return '%s.STAsText()' %(self.expand(first))
3239
3240 - def ST_CONTAINS(self, first, second):
3241 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3242
3243 - def ST_DISTANCE(self, first, second):
3244 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3245
3246 - def ST_EQUALS(self, first, second):
3247 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3248
3249 - def ST_INTERSECTS(self, first, second):
3250 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3251
3252 - def ST_OVERLAPS(self, first, second):
3253 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3254 3255 # no STSimplify in MSSQL 3256
3257 - def ST_TOUCHES(self, first, second):
3258 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3259
3260 - def ST_WITHIN(self, first, second):
3261 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3262
3263 - def represent(self, obj, fieldtype):
3264 field_is_type = fieldtype.startswith 3265 if field_is_type('geometry'): 3266 srid = 0 # MS SQL default srid for geometry 3267 geotype, parms = fieldtype[:-1].split('(') 3268 if parms: 3269 srid = parms 3270 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3271 elif fieldtype == 'geography': 3272 srid = 4326 # MS SQL default srid for geography 3273 geotype, parms = fieldtype[:-1].split('(') 3274 if parms: 3275 srid = parms 3276 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3277 # else: 3278 # raise SyntaxError('Invalid field type %s' %fieldtype) 3279 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3280 return BaseAdapter.represent(self, obj, fieldtype)
3281
3282 3283 -class MSSQL3Adapter(MSSQLAdapter):
3284 """ experimental support for pagination in MSSQL"""
3285 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3286 if limitby: 3287 (lmin, lmax) = limitby 3288 if lmin == 0: 3289 sql_s += ' TOP %i' % lmax 3290 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3291 lmin += 1 3292 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3293 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3294 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3295 sql_f_inner = [f for f in sql_f.split(',')] 3296 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3297 sql_f_iproxy = ', '.join(sql_f_iproxy) 3298 sql_f_oproxy = ', '.join(sql_f_outer) 3299 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3300 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3301 - def rowslice(self,rows,minimum=0,maximum=None):
3302 return rows
3303
3304 3305 -class MSSQL2Adapter(MSSQLAdapter):
3306 drivers = ('pyodbc',) 3307 3308 types = { 3309 'boolean': 'CHAR(1)', 3310 'string': 'NVARCHAR(%(length)s)', 3311 'text': 'NTEXT', 3312 'json': 'NTEXT', 3313 'password': 'NVARCHAR(%(length)s)', 3314 'blob': 'IMAGE', 3315 'upload': 'NVARCHAR(%(length)s)', 3316 'integer': 'INT', 3317 'bigint': 'BIGINT', 3318 'float': 'FLOAT', 3319 'double': 'FLOAT', 3320 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3321 'date': 'DATETIME', 3322 'time': 'CHAR(8)', 3323 'datetime': 'DATETIME', 3324 'id': 'INT IDENTITY PRIMARY KEY', 3325 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3326 'list:integer': 'NTEXT', 3327 'list:string': 'NTEXT', 3328 'list:reference': 'NTEXT', 3329 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3330 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3331 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3332 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3333 } 3334
3335 - def represent(self, obj, fieldtype):
3336 value = BaseAdapter.represent(self, obj, fieldtype) 3337 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3338 value = 'N'+value 3339 return value
3340
3341 - def execute(self,a):
3342 return self.log_execute(a.decode('utf8'))
3343
3344 -class VerticaAdapter(MSSQLAdapter):
3345 drivers = ('pyodbc',) 3346 T_SEP = ' ' 3347 3348 types = { 3349 'boolean': 'BOOLEAN', 3350 'string': 'VARCHAR(%(length)s)', 3351 'text': 'BYTEA', 3352 'json': 'VARCHAR(%(length)s)', 3353 'password': 'VARCHAR(%(length)s)', 3354 'blob': 'BYTEA', 3355 'upload': 'VARCHAR(%(length)s)', 3356 'integer': 'INT', 3357 'bigint': 'BIGINT', 3358 'float': 'FLOAT', 3359 'double': 'DOUBLE PRECISION', 3360 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3361 'date': 'DATE', 3362 'time': 'TIME', 3363 'datetime': 'DATETIME', 3364 'id': 'IDENTITY', 3365 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3366 'list:integer': 'BYTEA', 3367 'list:string': 'BYTEA', 3368 'list:reference': 'BYTEA', 3369 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3370 } 3371 3372
3373 - def EXTRACT(self, first, what):
3374 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3375
3376 - def _truncate(self, table, mode=''):
3377 tablename = table._tablename 3378 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3379
3380 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3381 if limitby: 3382 (lmin, lmax) = limitby 3383 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3384 return 'SELECT %s %s FROM %s%s%s;' % \ 3385 (sql_s, sql_f, sql_t, sql_w, sql_o)
3386
3387 - def lastrowid(self,table):
3388 self.execute('SELECT LAST_INSERT_ID();') 3389 return long(self.cursor.fetchone()[0])
3390
3391 - def execute(self, a):
3392 return self.log_execute(a)
3393
3394 -class SybaseAdapter(MSSQLAdapter):
3395 drivers = ('Sybase',) 3396 3397 types = { 3398 'boolean': 'BIT', 3399 'string': 'CHAR VARYING(%(length)s)', 3400 'text': 'TEXT', 3401 'json': 'TEXT', 3402 'password': 'CHAR VARYING(%(length)s)', 3403 'blob': 'IMAGE', 3404 'upload': 'CHAR VARYING(%(length)s)', 3405 'integer': 'INT', 3406 'bigint': 'BIGINT', 3407 'float': 'FLOAT', 3408 'double': 'FLOAT', 3409 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3410 'date': 'DATETIME', 3411 'time': 'CHAR(8)', 3412 'datetime': 'DATETIME', 3413 'id': 'INT IDENTITY PRIMARY KEY', 3414 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3415 'list:integer': 'TEXT', 3416 'list:string': 'TEXT', 3417 'list:reference': 'TEXT', 3418 'geometry': 'geometry', 3419 'geography': 'geography', 3420 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3421 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3422 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3423 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3424 } 3425 3426
3427 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3428 credential_decoder=IDENTITY, driver_args={}, 3429 adapter_args={}, do_connect=True, srid=4326, 3430 after_connection=None):
3431 self.db = db 3432 self.dbengine = "sybase" 3433 self.uri = uri 3434 if do_connect: self.find_driver(adapter_args,uri) 3435 self.pool_size = pool_size 3436 self.folder = folder 3437 self.db_codec = db_codec 3438 self._after_connection = after_connection 3439 self.srid = srid 3440 self.find_or_make_work_folder() 3441 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3442 ruri = uri.split('://',1)[1] 3443 if '@' not in ruri: 3444 try: 3445 m = self.REGEX_DSN.match(ruri) 3446 if not m: 3447 raise SyntaxError( 3448 'Parsing uri string(%s) has no result' % self.uri) 3449 dsn = m.group('dsn') 3450 if not dsn: 3451 raise SyntaxError('DSN required') 3452 except SyntaxError: 3453 e = sys.exc_info()[1] 3454 LOGGER.error('NdGpatch error') 3455 raise e 3456 else: 3457 m = self.REGEX_URI.match(uri) 3458 if not m: 3459 raise SyntaxError( 3460 "Invalid URI string in DAL: %s" % self.uri) 3461 user = credential_decoder(m.group('user')) 3462 if not user: 3463 raise SyntaxError('User required') 3464 password = credential_decoder(m.group('password')) 3465 if not password: 3466 password = '' 3467 host = m.group('host') 3468 if not host: 3469 raise SyntaxError('Host name required') 3470 db = m.group('db') 3471 if not db: 3472 raise SyntaxError('Database name required') 3473 port = m.group('port') or '1433' 3474 3475 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3476 3477 driver_args.update(user = credential_decoder(user), 3478 password = credential_decoder(password)) 3479 3480 def connector(dsn=dsn,driver_args=driver_args): 3481 return self.driver.connect(dsn,**driver_args)
3482 self.connector = connector 3483 if do_connect: self.reconnect()
3484
3485 - def integrity_error_class(self):
3486 return RuntimeError # FIX THIS
3487
3488 3489 -class FireBirdAdapter(BaseAdapter):
3490 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3491 3492 commit_on_alter_table = False 3493 support_distributed_transaction = True 3494 types = { 3495 'boolean': 'CHAR(1)', 3496 'string': 'VARCHAR(%(length)s)', 3497 'text': 'BLOB SUB_TYPE 1', 3498 'json': 'BLOB SUB_TYPE 1', 3499 'password': 'VARCHAR(%(length)s)', 3500 'blob': 'BLOB SUB_TYPE 0', 3501 'upload': 'VARCHAR(%(length)s)', 3502 'integer': 'INTEGER', 3503 'bigint': 'BIGINT', 3504 'float': 'FLOAT', 3505 'double': 'DOUBLE PRECISION', 3506 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3507 'date': 'DATE', 3508 'time': 'TIME', 3509 'datetime': 'TIMESTAMP', 3510 'id': 'INTEGER PRIMARY KEY', 3511 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3512 'list:integer': 'BLOB SUB_TYPE 1', 3513 'list:string': 'BLOB SUB_TYPE 1', 3514 'list:reference': 'BLOB SUB_TYPE 1', 3515 'big-id': 'BIGINT PRIMARY KEY', 3516 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3517 } 3518
3519 - def sequence_name(self,tablename):
3520 return 'genid_%s' % tablename
3521
3522 - def trigger_name(self,tablename):
3523 return 'trg_id_%s' % tablename
3524
3525 - def RANDOM(self):
3526 return 'RAND()'
3527
3528 - def EPOCH(self, first):
3529 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3530
3531 - def NOT_NULL(self,default,field_type):
3532 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3533
3534 - def SUBSTRING(self,field,parameters):
3535 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3536
3537 - def LENGTH(self, first):
3538 return "CHAR_LENGTH(%s)" % self.expand(first)
3539
3540 - def CONTAINS(self,first,second,case_sensitive=False):
3541 if first.type.startswith('list:'): 3542 second = Expression(None,self.CONCAT('|',Expression( 3543 None,self.REPLACE(second,('|','||'))),'|')) 3544 return '(%s CONTAINING %s)' % (self.expand(first), 3545 self.expand(second, 'string'))
3546
3547 - def _drop(self,table,mode):
3548 sequence_name = table._sequence_name 3549 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3550
3551 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3552 if limitby: 3553 (lmin, lmax) = limitby 3554 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3555 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3556
3557 - def _truncate(self,table,mode = ''):
3558 return ['DELETE FROM %s;' % table._tablename, 3559 'SET GENERATOR %s TO 0;' % table._sequence_name]
3560 3561 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3562
3563 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3564 credential_decoder=IDENTITY, driver_args={}, 3565 adapter_args={}, do_connect=True, after_connection=None):
3566 self.db = db 3567 self.dbengine = "firebird" 3568 self.uri = uri 3569 if do_connect: self.find_driver(adapter_args,uri) 3570 self.pool_size = pool_size 3571 self.folder = folder 3572 self.db_codec = db_codec 3573 self._after_connection = after_connection 3574 self.find_or_make_work_folder() 3575 ruri = uri.split('://',1)[1] 3576 m = self.REGEX_URI.match(ruri) 3577 if not m: 3578 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3579 user = credential_decoder(m.group('user')) 3580 if not user: 3581 raise SyntaxError('User required') 3582 password = credential_decoder(m.group('password')) 3583 if not password: 3584 password = '' 3585 host = m.group('host') 3586 if not host: 3587 raise SyntaxError('Host name required') 3588 port = int(m.group('port') or 3050) 3589 db = m.group('db') 3590 if not db: 3591 raise SyntaxError('Database name required') 3592 charset = m.group('charset') or 'UTF8' 3593 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3594 user = credential_decoder(user), 3595 password = credential_decoder(password), 3596 charset = charset) 3597 3598 def connector(driver_args=driver_args): 3599 return self.driver.connect(**driver_args)
3600 self.connector = connector 3601 if do_connect: self.reconnect()
3602
3603 - def create_sequence_and_triggers(self, query, table, **args):
3604 tablename = table._tablename 3605 sequence_name = table._sequence_name 3606 trigger_name = table._trigger_name 3607 self.execute(query) 3608 self.execute('create generator %s;' % sequence_name) 3609 self.execute('set generator %s to 0;' % sequence_name) 3610 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3611
3612 - def lastrowid(self,table):
3613 sequence_name = table._sequence_name 3614 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3615 return long(self.cursor.fetchone()[0])
3616
3617 3618 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3619 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3620 3621 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3622
3623 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3624 credential_decoder=IDENTITY, driver_args={}, 3625 adapter_args={}, do_connect=True, after_connection=None):
3626 self.db = db 3627 self.dbengine = "firebird" 3628 self.uri = uri 3629 if do_connect: self.find_driver(adapter_args,uri) 3630 self.pool_size = pool_size 3631 self.folder = folder 3632 self.db_codec = db_codec 3633 self._after_connection = after_connection 3634 self.find_or_make_work_folder() 3635 ruri = uri.split('://',1)[1] 3636 m = self.REGEX_URI.match(ruri) 3637 if not m: 3638 raise SyntaxError( 3639 "Invalid URI string in DAL: %s" % self.uri) 3640 user = credential_decoder(m.group('user')) 3641 if not user: 3642 raise SyntaxError('User required') 3643 password = credential_decoder(m.group('password')) 3644 if not password: 3645 password = '' 3646 pathdb = m.group('path') 3647 if not pathdb: 3648 raise SyntaxError('Path required') 3649 charset = m.group('charset') 3650 if not charset: 3651 charset = 'UTF8' 3652 host = '' 3653 driver_args.update(host=host, 3654 database=pathdb, 3655 user=credential_decoder(user), 3656 password=credential_decoder(password), 3657 charset=charset) 3658 3659 def connector(driver_args=driver_args): 3660 return self.driver.connect(**driver_args)
3661 self.connector = connector 3662 if do_connect: self.reconnect()
3663
3664 -class InformixAdapter(BaseAdapter):
3665 drivers = ('informixdb',) 3666 3667 types = { 3668 'boolean': 'CHAR(1)', 3669 'string': 'VARCHAR(%(length)s)', 3670 'text': 'BLOB SUB_TYPE 1', 3671 'json': 'BLOB SUB_TYPE 1', 3672 'password': 'VARCHAR(%(length)s)', 3673 'blob': 'BLOB SUB_TYPE 0', 3674 'upload': 'VARCHAR(%(length)s)', 3675 'integer': 'INTEGER', 3676 'bigint': 'BIGINT', 3677 'float': 'FLOAT', 3678 'double': 'DOUBLE PRECISION', 3679 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3680 'date': 'DATE', 3681 'time': 'CHAR(8)', 3682 'datetime': 'DATETIME', 3683 'id': 'SERIAL', 3684 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3685 'list:integer': 'BLOB SUB_TYPE 1', 3686 'list:string': 'BLOB SUB_TYPE 1', 3687 'list:reference': 'BLOB SUB_TYPE 1', 3688 'big-id': 'BIGSERIAL', 3689 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3690 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3691 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3692 } 3693
3694 - def RANDOM(self):
3695 return 'Random()'
3696
3697 - def NOT_NULL(self,default,field_type):
3698 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3699
3700 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3701 if limitby: 3702 (lmin, lmax) = limitby 3703 fetch_amt = lmax - lmin 3704 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3705 if lmin and (dbms_version >= 10): 3706 # Requires Informix 10.0+ 3707 sql_s += ' SKIP %d' % (lmin, ) 3708 if fetch_amt and (dbms_version >= 9): 3709 # Requires Informix 9.0+ 3710 sql_s += ' FIRST %d' % (fetch_amt, ) 3711 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3712
3713 - def represent_exceptions(self, obj, fieldtype):
3714 if fieldtype == 'date': 3715 if isinstance(obj, (datetime.date, datetime.datetime)): 3716 obj = obj.isoformat()[:10] 3717 else: 3718 obj = str(obj) 3719 return "to_date('%s','%%Y-%%m-%%d')" % obj 3720 elif fieldtype == 'datetime': 3721 if isinstance(obj, datetime.datetime): 3722 obj = obj.isoformat()[:19].replace('T',' ') 3723 elif isinstance(obj, datetime.date): 3724 obj = obj.isoformat()[:10]+' 00:00:00' 3725 else: 3726 obj = str(obj) 3727 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3728 return None
3729 3730 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3731
3732 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3733 credential_decoder=IDENTITY, driver_args={}, 3734 adapter_args={}, do_connect=True, after_connection=None):
3735 self.db = db 3736 self.dbengine = "informix" 3737 self.uri = uri 3738 if do_connect: self.find_driver(adapter_args,uri) 3739 self.pool_size = pool_size 3740 self.folder = folder 3741 self.db_codec = db_codec 3742 self._after_connection = after_connection 3743 self.find_or_make_work_folder() 3744 ruri = uri.split('://',1)[1] 3745 m = self.REGEX_URI.match(ruri) 3746 if not m: 3747 raise SyntaxError( 3748 "Invalid URI string in DAL: %s" % self.uri) 3749 user = credential_decoder(m.group('user')) 3750 if not user: 3751 raise SyntaxError('User required') 3752 password = credential_decoder(m.group('password')) 3753 if not password: 3754 password = '' 3755 host = m.group('host') 3756 if not host: 3757 raise SyntaxError('Host name required') 3758 db = m.group('db') 3759 if not db: 3760 raise SyntaxError('Database name required') 3761 user = credential_decoder(user) 3762 password = credential_decoder(password) 3763 dsn = '%s@%s' % (db,host) 3764 driver_args.update(user=user,password=password,autocommit=True) 3765 def connector(dsn=dsn,driver_args=driver_args): 3766 return self.driver.connect(dsn,**driver_args)
3767 self.connector = connector 3768 if do_connect: self.reconnect()
3769
3770 - def execute(self,command):
3771 if command[-1:]==';': 3772 command = command[:-1] 3773 return self.log_execute(command)
3774
3775 - def lastrowid(self,table):
3776 return self.cursor.sqlerrd[1]
3777
3778 - def integrity_error_class(self):
3779 return informixdb.IntegrityError
3780
3781 -class InformixSEAdapter(InformixAdapter):
3782 """ work in progress """ 3783
3784 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3785 return 'SELECT %s %s FROM %s%s%s;' % \ 3786 (sql_s, sql_f, sql_t, sql_w, sql_o)
3787
3788 - def rowslice(self,rows,minimum=0,maximum=None):
3789 if maximum is None: 3790 return rows[minimum:] 3791 return rows[minimum:maximum]
3792
3793 -class DB2Adapter(BaseAdapter):
3794 drivers = ('pyodbc',) 3795 3796 types = { 3797 'boolean': 'CHAR(1)', 3798 'string': 'VARCHAR(%(length)s)', 3799 'text': 'CLOB', 3800 'json': 'CLOB', 3801 'password': 'VARCHAR(%(length)s)', 3802 'blob': 'BLOB', 3803 'upload': 'VARCHAR(%(length)s)', 3804 'integer': 'INT', 3805 'bigint': 'BIGINT', 3806 'float': 'REAL', 3807 'double': 'DOUBLE', 3808 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3809 'date': 'DATE', 3810 'time': 'TIME', 3811 'datetime': 'TIMESTAMP', 3812 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3813 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3814 'list:integer': 'CLOB', 3815 'list:string': 'CLOB', 3816 'list:reference': 'CLOB', 3817 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3818 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3819 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3820 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3821 } 3822
3823 - def LEFT_JOIN(self):
3824 return 'LEFT OUTER JOIN'
3825
3826 - def RANDOM(self):
3827 return 'RAND()'
3828
3829 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3830 if limitby: 3831 (lmin, lmax) = limitby 3832 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3833 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3834
3835 - def represent_exceptions(self, obj, fieldtype):
3836 if fieldtype == 'blob': 3837 obj = base64.b64encode(str(obj)) 3838 return "BLOB('%s')" % obj 3839 elif fieldtype == 'datetime': 3840 if isinstance(obj, datetime.datetime): 3841 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3842 elif isinstance(obj, datetime.date): 3843 obj = obj.isoformat()[:10]+'-00.00.00' 3844 return "'%s'" % obj 3845 return None
3846
3847 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3848 credential_decoder=IDENTITY, driver_args={}, 3849 adapter_args={}, do_connect=True, after_connection=None):
3850 self.db = db 3851 self.dbengine = "db2" 3852 self.uri = uri 3853 if do_connect: self.find_driver(adapter_args,uri) 3854 self.pool_size = pool_size 3855 self.folder = folder 3856 self.db_codec = db_codec 3857 self._after_connection = after_connection 3858 self.find_or_make_work_folder() 3859 ruri = uri.split('://', 1)[1] 3860 def connector(cnxn=ruri,driver_args=driver_args): 3861 return self.driver.connect(cnxn,**driver_args)
3862 self.connector = connector 3863 if do_connect: self.reconnect()
3864
3865 - def execute(self,command):
3866 if command[-1:]==';': 3867 command = command[:-1] 3868 return self.log_execute(command)
3869
3870 - def lastrowid(self,table):
3871 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3872 return long(self.cursor.fetchone()[0])
3873
3874 - def rowslice(self,rows,minimum=0,maximum=None):
3875 if maximum is None: 3876 return rows[minimum:] 3877 return rows[minimum:maximum]
3878
3879 3880 -class TeradataAdapter(BaseAdapter):
3881 drivers = ('pyodbc',) 3882 3883 types = { 3884 'boolean': 'CHAR(1)', 3885 'string': 'VARCHAR(%(length)s)', 3886 'text': 'CLOB', 3887 'json': 'CLOB', 3888 'password': 'VARCHAR(%(length)s)', 3889 'blob': 'BLOB', 3890 'upload': 'VARCHAR(%(length)s)', 3891 'integer': 'INT', 3892 'bigint': 'BIGINT', 3893 'float': 'REAL', 3894 'double': 'DOUBLE', 3895 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3896 'date': 'DATE', 3897 'time': 'TIME', 3898 'datetime': 'TIMESTAMP', 3899 # Modified Constraint syntax for Teradata. 3900 # Teradata does not support ON DELETE. 3901 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3902 'reference': 'INT', 3903 'list:integer': 'CLOB', 3904 'list:string': 'CLOB', 3905 'list:reference': 'CLOB', 3906 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3907 'big-reference': 'BIGINT', 3908 'reference FK': ' REFERENCES %(foreign_key)s', 3909 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3910 } 3911
3912 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3913 credential_decoder=IDENTITY, driver_args={}, 3914 adapter_args={}, do_connect=True, after_connection=None):
3915 self.db = db 3916 self.dbengine = "teradata" 3917 self.uri = uri 3918 if do_connect: self.find_driver(adapter_args,uri) 3919 self.pool_size = pool_size 3920 self.folder = folder 3921 self.db_codec = db_codec 3922 self._after_connection = after_connection 3923 self.find_or_make_work_folder() 3924 ruri = uri.split('://', 1)[1] 3925 def connector(cnxn=ruri,driver_args=driver_args): 3926 return self.driver.connect(cnxn,**driver_args)
3927 self.connector = connector 3928 if do_connect: self.reconnect()
3929
3930 - def LEFT_JOIN(self):
3931 return 'LEFT OUTER JOIN'
3932 3933 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3934 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3935 if limitby: 3936 (lmin, lmax) = limitby 3937 sql_s += ' TOP %i' % lmax 3938 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3939
3940 - def _truncate(self, table, mode=''):
3941 tablename = table._tablename 3942 return ['DELETE FROM %s ALL;' % (tablename)]
3943 3944 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3945 # (ANSI-SQL wants this form of name 3946 # to be a delimited identifier) 3947 3948 -class IngresAdapter(BaseAdapter):
3949 drivers = ('pyodbc',) 3950 3951 types = { 3952 'boolean': 'CHAR(1)', 3953 'string': 'VARCHAR(%(length)s)', 3954 'text': 'CLOB', 3955 'json': 'CLOB', 3956 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3957 'blob': 'BLOB', 3958 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3959 'integer': 'INTEGER4', # or int8... 3960 'bigint': 'BIGINT', 3961 'float': 'FLOAT', 3962 'double': 'FLOAT8', 3963 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3964 'date': 'ANSIDATE', 3965 'time': 'TIME WITHOUT TIME ZONE', 3966 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3967 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3968 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3969 'list:integer': 'CLOB', 3970 'list:string': 'CLOB', 3971 'list:reference': 'CLOB', 3972 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 3973 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3974 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3975 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3976 } 3977
3978 - def LEFT_JOIN(self):
3979 return 'LEFT OUTER JOIN'
3980
3981 - def RANDOM(self):
3982 return 'RANDOM()'
3983
3984 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3985 if limitby: 3986 (lmin, lmax) = limitby 3987 fetch_amt = lmax - lmin 3988 if fetch_amt: 3989 sql_s += ' FIRST %d ' % (fetch_amt, ) 3990 if lmin: 3991 # Requires Ingres 9.2+ 3992 sql_o += ' OFFSET %d' % (lmin, ) 3993 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3994
3995 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3996 credential_decoder=IDENTITY, driver_args={}, 3997 adapter_args={}, do_connect=True, after_connection=None):
3998 self.db = db 3999 self.dbengine = "ingres" 4000 self._driver = pyodbc 4001 self.uri = uri 4002 if do_connect: self.find_driver(adapter_args,uri) 4003 self.pool_size = pool_size 4004 self.folder = folder 4005 self.db_codec = db_codec 4006 self._after_connection = after_connection 4007 self.find_or_make_work_folder() 4008 connstr = uri.split(':', 1)[1] 4009 # Simple URI processing 4010 connstr = connstr.lstrip() 4011 while connstr.startswith('/'): 4012 connstr = connstr[1:] 4013 if '=' in connstr: 4014 # Assume we have a regular ODBC connection string and just use it 4015 ruri = connstr 4016 else: 4017 # Assume only (local) dbname is passed in with OS auth 4018 database_name = connstr 4019 default_driver_name = 'Ingres' 4020 vnode = '(local)' 4021 servertype = 'ingres' 4022 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4023 def connector(cnxn=ruri,driver_args=driver_args): 4024 return self.driver.connect(cnxn,**driver_args)
4025 4026 self.connector = connector 4027 4028 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4029 if do_connect: self.reconnect()
4030
4031 - def create_sequence_and_triggers(self, query, table, **args):
4032 # post create table auto inc code (if needed) 4033 # modify table to btree for performance.... 4034 # Older Ingres releases could use rule/trigger like Oracle above. 4035 if hasattr(table,'_primarykey'): 4036 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4037 (table._tablename, 4038 ', '.join(["'%s'" % x for x in table.primarykey])) 4039 self.execute(modify_tbl_sql) 4040 else: 4041 tmp_seqname='%s_iisq' % table._tablename 4042 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4043 self.execute('create sequence %s' % tmp_seqname) 4044 self.execute(query) 4045 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4046 4047
4048 - def lastrowid(self,table):
4049 tmp_seqname='%s_iisq' % table 4050 self.execute('select current value for %s' % tmp_seqname) 4051 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4052
4053 - def integrity_error_class(self):
4054 return self._driver.IntegrityError
4055
4056 4057 -class IngresUnicodeAdapter(IngresAdapter):
4058 4059 drivers = ('pyodbc',) 4060 4061 types = { 4062 'boolean': 'CHAR(1)', 4063 'string': 'NVARCHAR(%(length)s)', 4064 'text': 'NCLOB', 4065 'json': 'NCLOB', 4066 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4067 'blob': 'BLOB', 4068 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4069 'integer': 'INTEGER4', # or int8... 4070 'bigint': 'BIGINT', 4071 'float': 'FLOAT', 4072 'double': 'FLOAT8', 4073 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4074 'date': 'ANSIDATE', 4075 'time': 'TIME WITHOUT TIME ZONE', 4076 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4077 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4078 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4079 'list:integer': 'NCLOB', 4080 'list:string': 'NCLOB', 4081 'list:reference': 'NCLOB', 4082 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4083 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4084 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4085 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4086 }
4087
4088 -class SAPDBAdapter(BaseAdapter):
4089 drivers = ('sapdb',) 4090 4091 support_distributed_transaction = False 4092 types = { 4093 'boolean': 'CHAR(1)', 4094 'string': 'VARCHAR(%(length)s)', 4095 'text': 'LONG', 4096 'json': 'LONG', 4097 'password': 'VARCHAR(%(length)s)', 4098 'blob': 'LONG', 4099 'upload': 'VARCHAR(%(length)s)', 4100 'integer': 'INT', 4101 'bigint': 'BIGINT', 4102 'float': 'FLOAT', 4103 'double': 'DOUBLE PRECISION', 4104 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4105 'date': 'DATE', 4106 'time': 'TIME', 4107 'datetime': 'TIMESTAMP', 4108 'id': 'INT PRIMARY KEY', 4109 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4110 'list:integer': 'LONG', 4111 'list:string': 'LONG', 4112 'list:reference': 'LONG', 4113 'big-id': 'BIGINT PRIMARY KEY', 4114 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4115 } 4116
4117 - def sequence_name(self,table):
4118 return '%s_id_Seq' % table
4119
4120 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4121 if limitby: 4122 (lmin, lmax) = limitby 4123 if len(sql_w) > 1: 4124 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4125 else: 4126 sql_w_row = 'WHERE w_row > %i' % lmin 4127 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4128 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4129
4130 - def create_sequence_and_triggers(self, query, table, **args):
4131 # following lines should only be executed if table._sequence_name does not exist 4132 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4133 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4134 % (table._tablename, table._id.name, table._sequence_name)) 4135 self.execute(query)
4136 4137 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4138 4139
4140 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4141 credential_decoder=IDENTITY, driver_args={}, 4142 adapter_args={}, do_connect=True, after_connection=None):
4143 self.db = db 4144 self.dbengine = "sapdb" 4145 self.uri = uri 4146 if do_connect: self.find_driver(adapter_args,uri) 4147 self.pool_size = pool_size 4148 self.folder = folder 4149 self.db_codec = db_codec 4150 self._after_connection = after_connection 4151 self.find_or_make_work_folder() 4152 ruri = uri.split('://',1)[1] 4153 m = self.REGEX_URI.match(ruri) 4154 if not m: 4155 raise SyntaxError("Invalid URI string in DAL") 4156 user = credential_decoder(m.group('user')) 4157 if not user: 4158 raise SyntaxError('User required') 4159 password = credential_decoder(m.group('password')) 4160 if not password: 4161 password = '' 4162 host = m.group('host') 4163 if not host: 4164 raise SyntaxError('Host name required') 4165 db = m.group('db') 4166 if not db: 4167 raise SyntaxError('Database name required') 4168 def connector(user=user, password=password, database=db, 4169 host=host, driver_args=driver_args): 4170 return self.driver.Connection(user, password, database, 4171 host, **driver_args)
4172 self.connector = connector 4173 if do_connect: self.reconnect()
4174
4175 - def lastrowid(self,table):
4176 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4177 return long(self.cursor.fetchone()[0])
4178
4179 -class CubridAdapter(MySQLAdapter):
4180 drivers = ('cubriddb',) 4181 4182 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4183
4184 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4185 credential_decoder=IDENTITY, driver_args={}, 4186 adapter_args={}, do_connect=True, after_connection=None):
4187 self.db = db 4188 self.dbengine = "cubrid" 4189 self.uri = uri 4190 if do_connect: self.find_driver(adapter_args,uri) 4191 self.pool_size = pool_size 4192 self.folder = folder 4193 self.db_codec = db_codec 4194 self._after_connection = after_connection 4195 self.find_or_make_work_folder() 4196 ruri = uri.split('://',1)[1] 4197 m = self.REGEX_URI.match(ruri) 4198 if not m: 4199 raise SyntaxError( 4200 "Invalid URI string in DAL: %s" % self.uri) 4201 user = credential_decoder(m.group('user')) 4202 if not user: 4203 raise SyntaxError('User required') 4204 password = credential_decoder(m.group('password')) 4205 if not password: 4206 password = '' 4207 host = m.group('host') 4208 if not host: 4209 raise SyntaxError('Host name required') 4210 db = m.group('db') 4211 if not db: 4212 raise SyntaxError('Database name required') 4213 port = int(m.group('port') or '30000') 4214 charset = m.group('charset') or 'utf8' 4215 user = credential_decoder(user) 4216 passwd = credential_decoder(password) 4217 def connector(host=host,port=port,db=db, 4218 user=user,passwd=password,driver_args=driver_args): 4219 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4220 self.connector = connector 4221 if do_connect: self.reconnect()
4222
4223 - def after_connection(self):
4224 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4225 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4226
4227 4228 ######## GAE MySQL ########## 4229 4230 -class DatabaseStoredFile:
4231 4232 web2py_filesystem = False 4233
4234 - def escape(self,obj):
4235 return self.db._adapter.escape(obj)
4236
4237 - def __init__(self,db,filename,mode):
4238 if not db._adapter.dbengine in ('mysql', 'postgres'): 4239 raise RuntimeError("only MySQL/Postgres can store metadata .table files in database for now") 4240 self.db = db 4241 self.filename = filename 4242 self.mode = mode 4243 if not self.web2py_filesystem: 4244 if db._adapter.dbengine == 'mysql': 4245 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4246 elif db._adapter.dbengine == 'postgres': 4247 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4248 self.db.executesql(sql) 4249 DatabaseStoredFile.web2py_filesystem = True 4250 self.p=0 4251 self.data = '' 4252 if mode in ('r','rw','a'): 4253 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4254 % filename 4255 rows = self.db.executesql(query) 4256 if rows: 4257 self.data = rows[0][0] 4258 elif exists(filename): 4259 datafile = open(filename, 'r') 4260 try: 4261 self.data = datafile.read() 4262 finally: 4263 datafile.close() 4264 elif mode in ('r','rw'): 4265 raise RuntimeError("File %s does not exist" % filename)
4266
4267 - def read(self, bytes):
4268 data = self.data[self.p:self.p+bytes] 4269 self.p += len(data) 4270 return data
4271
4272 - def readline(self):
4273 i = self.data.find('\n',self.p)+1 4274 if i>0: 4275 data, self.p = self.data[self.p:i], i 4276 else: 4277 data, self.p = self.data[self.p:], len(self.data) 4278 return data
4279
4280 - def write(self,data):
4281 self.data += data
4282
4283 - def close_connection(self):
4284 if self.db is not None: 4285 self.db.executesql( 4286 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4287 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4288 % (self.filename, self.data.replace("'","''")) 4289 self.db.executesql(query) 4290 self.db.commit() 4291 self.db = None
4292
4293 - def close(self):
4294 self.close_connection()
4295 4296 @staticmethod
4297 - def exists(db, filename):
4298 if exists(filename): 4299 return True 4300 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4301 if db.executesql(query): 4302 return True 4303 return False
4304
4305 4306 -class UseDatabaseStoredFile:
4307
4308 - def file_exists(self, filename):
4309 return DatabaseStoredFile.exists(self.db,filename)
4310
4311 - def file_open(self, filename, mode='rb', lock=True):
4312 return DatabaseStoredFile(self.db,filename,mode)
4313
4314 - def file_close(self, fileobj):
4315 fileobj.close_connection()
4316
4317 - def file_delete(self,filename):
4318 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4319 self.db.executesql(query) 4320 self.db.commit()
4321
4322 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4323 uploads_in_blob = True 4324 4325 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4326
4327 - def __init__(self, db, uri='google:sql://realm:domain/database', 4328 pool_size=0, folder=None, db_codec='UTF-8', 4329 credential_decoder=IDENTITY, driver_args={}, 4330 adapter_args={}, do_connect=True, after_connection=None):
4331 4332 self.db = db 4333 self.dbengine = "mysql" 4334 self.uri = uri 4335 self.pool_size = pool_size 4336 self.db_codec = db_codec 4337 self._after_connection = after_connection 4338 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4339 os.sep+'applications'+os.sep,1)[1]) 4340 ruri = uri.split("://")[1] 4341 m = self.REGEX_URI.match(ruri) 4342 if not m: 4343 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4344 instance = credential_decoder(m.group('instance')) 4345 self.dbstring = db = credential_decoder(m.group('db')) 4346 driver_args['instance'] = instance 4347 if not 'charset' in driver_args: 4348 driver_args['charset'] = 'utf8' 4349 self.createdb = createdb = adapter_args.get('createdb',True) 4350 if not createdb: 4351 driver_args['database'] = db 4352 def connector(driver_args=driver_args): 4353 return rdbms.connect(**driver_args)
4354 self.connector = connector 4355 if do_connect: self.reconnect()
4356
4357 - def after_connection(self):
4358 if self.createdb: 4359 # self.execute('DROP DATABASE %s' % self.dbstring) 4360 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4361 self.execute('USE %s' % self.dbstring) 4362 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4363 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4364
4365 - def execute(self, command, *a, **b):
4366 return self.log_execute(command.decode('utf8'), *a, **b)
4367
4368 -class NoSQLAdapter(BaseAdapter):
4369 can_select_for_update = False 4370 4371 @staticmethod
4372 - def to_unicode(obj):
4373 if isinstance(obj, str): 4374 return obj.decode('utf8') 4375 elif not isinstance(obj, unicode): 4376 return unicode(obj) 4377 return obj
4378
4379 - def id_query(self, table):
4380 return table._id > 0
4381
4382 - def represent(self, obj, fieldtype):
4383 field_is_type = fieldtype.startswith 4384 if isinstance(obj, CALLABLETYPES): 4385 obj = obj() 4386 if isinstance(fieldtype, SQLCustomType): 4387 return fieldtype.encoder(obj) 4388 if isinstance(obj, (Expression, Field)): 4389 raise SyntaxError("non supported on GAE") 4390 if self.dbengine == 'google:datastore': 4391 if isinstance(fieldtype, gae.Property): 4392 return obj 4393 is_string = isinstance(fieldtype,str) 4394 is_list = is_string and field_is_type('list:') 4395 if is_list: 4396 if not obj: 4397 obj = [] 4398 if not isinstance(obj, (list, tuple)): 4399 obj = [obj] 4400 if obj == '' and not \ 4401 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4402 return None 4403 if not obj is None: 4404 if isinstance(obj, list) and not is_list: 4405 obj = [self.represent(o, fieldtype) for o in obj] 4406 elif fieldtype in ('integer','bigint','id'): 4407 obj = long(obj) 4408 elif fieldtype == 'double': 4409 obj = float(obj) 4410 elif is_string and field_is_type('reference'): 4411 if isinstance(obj, (Row, Reference)): 4412 obj = obj['id'] 4413 obj = long(obj) 4414 elif fieldtype == 'boolean': 4415 if obj and not str(obj)[0].upper() in '0F': 4416 obj = True 4417 else: 4418 obj = False 4419 elif fieldtype == 'date': 4420 if not isinstance(obj, datetime.date): 4421 (y, m, d) = map(int,str(obj).strip().split('-')) 4422 obj = datetime.date(y, m, d) 4423 elif isinstance(obj,datetime.datetime): 4424 (y, m, d) = (obj.year, obj.month, obj.day) 4425 obj = datetime.date(y, m, d) 4426 elif fieldtype == 'time': 4427 if not isinstance(obj, datetime.time): 4428 time_items = map(int,str(obj).strip().split(':')[:3]) 4429 if len(time_items) == 3: 4430 (h, mi, s) = time_items 4431 else: 4432 (h, mi, s) = time_items + [0] 4433 obj = datetime.time(h, mi, s) 4434 elif fieldtype == 'datetime': 4435 if not isinstance(obj, datetime.datetime): 4436 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4437 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4438 while len(time_items)<3: 4439 time_items.append(0) 4440 (h, mi, s) = time_items 4441 obj = datetime.datetime(y, m, d, h, mi, s) 4442 elif fieldtype == 'blob': 4443 pass 4444 elif fieldtype == 'json': 4445 if isinstance(obj, basestring): 4446 obj = self.to_unicode(obj) 4447 if have_serializers: 4448 obj = serializers.loads_json(obj) 4449 elif simplejson: 4450 obj = simplejson.loads(obj) 4451 else: 4452 raise RuntimeError("missing simplejson") 4453 elif is_string and field_is_type('list:string'): 4454 return map(self.to_unicode,obj) 4455 elif is_list: 4456 return map(int,obj) 4457 else: 4458 obj = self.to_unicode(obj) 4459 return obj
4460
4461 - def _insert(self,table,fields):
4462 return 'insert %s in %s' % (fields, table)
4463
4464 - def _count(self,query,distinct=None):
4465 return 'count %s' % repr(query)
4466
4467 - def _select(self,query,fields,attributes):
4468 return 'select %s where %s' % (repr(fields), repr(query))
4469
4470 - def _delete(self,tablename, query):
4471 return 'delete %s where %s' % (repr(tablename),repr(query))
4472
4473 - def _update(self,tablename,query,fields):
4474 return 'update %s (%s) where %s' % (repr(tablename), 4475 repr(fields),repr(query))
4476
4477 - def commit(self):
4478 """ 4479 remember: no transactions on many NoSQL 4480 """ 4481 pass
4482
4483 - def rollback(self):
4484 """ 4485 remember: no transactions on many NoSQL 4486 """ 4487 pass
4488
4489 - def close_connection(self):
4490 """ 4491 remember: no transactions on many NoSQL 4492 """ 4493 pass
4494 4495 4496 # these functions should never be called!
4497 - def OR(self,first,second): raise SyntaxError("Not supported")
4498 - def AND(self,first,second): raise SyntaxError("Not supported")
4499 - def AS(self,first,second): raise SyntaxError("Not supported")
4500 - def ON(self,first,second): raise SyntaxError("Not supported")
4501 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4502 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4503 - def ADD(self,first,second): raise SyntaxError("Not supported")
4504 - def SUB(self,first,second): raise SyntaxError("Not supported")
4505 - def MUL(self,first,second): raise SyntaxError("Not supported")
4506 - def DIV(self,first,second): raise SyntaxError("Not supported")
4507 - def LOWER(self,first): raise SyntaxError("Not supported")
4508 - def UPPER(self,first): raise SyntaxError("Not supported")
4509 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4510 - def LENGTH(self, first): raise SyntaxError("Not supported")
4511 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4512 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4513 - def RANDOM(self): raise SyntaxError("Not supported")
4514 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4515 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4516 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4517 - def drop(self,table,mode): raise SyntaxError("Not supported")
4518 - def alias(self,table,alias): raise SyntaxError("Not supported")
4519 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4520 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4521 - def prepare(self,key): raise SyntaxError("Not supported")
4522 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4523 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4524 - def concat_add(self,table): raise SyntaxError("Not supported")
4525 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4526 - def create_sequence_and_triggers(self, query, table, **args): pass
4527 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4528 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4529 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4530 - def lastrowid(self,table): raise SyntaxError("Not supported")
4531 - def integrity_error_class(self): raise SyntaxError("Not supported")
4532 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4533
4534 4535 -class GAEF(object):
4536 - def __init__(self,name,op,value,apply):
4537 self.name=name=='id' and '__key__' or name 4538 self.op=op 4539 self.value=value 4540 self.apply=apply
4541 - def __repr__(self):
4542 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4543
4544 -class GoogleDatastoreAdapter(NoSQLAdapter):
4545 uploads_in_blob = True 4546 types = {} 4547
4548 - def file_exists(self, filename): pass
4549 - def file_open(self, filename, mode='rb', lock=True): pass
4550 - def file_close(self, fileobj): pass
4551 4552 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4553
4554 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4555 credential_decoder=IDENTITY, driver_args={}, 4556 adapter_args={}, do_connect=True, after_connection=None):
4557 self.types.update({ 4558 'boolean': gae.BooleanProperty, 4559 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4560 'text': gae.TextProperty, 4561 'json': gae.TextProperty, 4562 'password': gae.StringProperty, 4563 'blob': gae.BlobProperty, 4564 'upload': gae.StringProperty, 4565 'integer': gae.IntegerProperty, 4566 'bigint': gae.IntegerProperty, 4567 'float': gae.FloatProperty, 4568 'double': gae.FloatProperty, 4569 'decimal': GAEDecimalProperty, 4570 'date': gae.DateProperty, 4571 'time': gae.TimeProperty, 4572 'datetime': gae.DateTimeProperty, 4573 'id': None, 4574 'reference': gae.IntegerProperty, 4575 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4576 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4577 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4578 }) 4579 self.db = db 4580 self.uri = uri 4581 self.dbengine = 'google:datastore' 4582 self.folder = folder 4583 db['_lastsql'] = '' 4584 self.db_codec = 'UTF-8' 4585 self._after_connection = after_connection 4586 self.pool_size = 0 4587 match = self.REGEX_NAMESPACE.match(uri) 4588 if match: 4589 namespace_manager.set_namespace(match.group('namespace'))
4590
4591 - def parse_id(self, value, field_type):
4592 return value
4593
4594 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4595 myfields = {} 4596 for field in table: 4597 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4598 continue 4599 attr = {} 4600 if isinstance(field.custom_qualifier, dict): 4601 #this is custom properties to add to the GAE field declartion 4602 attr = field.custom_qualifier 4603 field_type = field.type 4604 if isinstance(field_type, SQLCustomType): 4605 ftype = self.types[field_type.native or field_type.type](**attr) 4606 elif isinstance(field_type, gae.Property): 4607 ftype = field_type 4608 elif field_type.startswith('id'): 4609 continue 4610 elif field_type.startswith('decimal'): 4611 precision, scale = field_type[7:].strip('()').split(',') 4612 precision = int(precision) 4613 scale = int(scale) 4614 ftype = GAEDecimalProperty(precision, scale, **attr) 4615 elif field_type.startswith('reference'): 4616 if field.notnull: 4617 attr = dict(required=True) 4618 referenced = field_type[10:].strip() 4619 ftype = self.types[field_type[:9]](referenced, **attr) 4620 elif field_type.startswith('list:reference'): 4621 if field.notnull: 4622 attr['required'] = True 4623 referenced = field_type[15:].strip() 4624 ftype = self.types[field_type[:14]](**attr) 4625 elif field_type.startswith('list:'): 4626 ftype = self.types[field_type](**attr) 4627 elif not field_type in self.types\ 4628 or not self.types[field_type]: 4629 raise SyntaxError('Field: unknown field type: %s' % field_type) 4630 else: 4631 ftype = self.types[field_type](**attr) 4632 myfields[field.name] = ftype 4633 if not polymodel: 4634 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4635 elif polymodel==True: 4636 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4637 elif isinstance(polymodel,Table): 4638 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4639 else: 4640 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4641 return None
4642
4643 - def expand(self,expression,field_type=None):
4644 if isinstance(expression,Field): 4645 if expression.type in ('text', 'blob', 'json'): 4646 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4647 return expression.name 4648 elif isinstance(expression, (Expression, Query)): 4649 if not expression.second is None: 4650 return expression.op(expression.first, expression.second) 4651 elif not expression.first is None: 4652 return expression.op(expression.first) 4653 else: 4654 return expression.op() 4655 elif field_type: 4656 return self.represent(expression,field_type) 4657 elif isinstance(expression,(list,tuple)): 4658 return ','.join([self.represent(item,field_type) for item in expression]) 4659 else: 4660 return str(expression)
4661 4662 ### TODO from gql.py Expression
4663 - def AND(self,first,second):
4664 a = self.expand(first) 4665 b = self.expand(second) 4666 if b[0].name=='__key__' and a[0].name!='__key__': 4667 return b+a 4668 return a+b
4669
4670 - def EQ(self,first,second=None):
4671 if isinstance(second, Key): 4672 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4673 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4674
4675 - def NE(self,first,second=None):
4676 if first.type != 'id': 4677 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4678 else: 4679 if not second is None: 4680 second = Key.from_path(first._tablename, long(second)) 4681 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4682
4683 - def LT(self,first,second=None):
4684 if first.type != 'id': 4685 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4686 else: 4687 second = Key.from_path(first._tablename, long(second)) 4688 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4689
4690 - def LE(self,first,second=None):
4691 if first.type != 'id': 4692 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4693 else: 4694 second = Key.from_path(first._tablename, long(second)) 4695 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4696
4697 - def GT(self,first,second=None):
4698 if first.type != 'id' or second==0 or second == '0': 4699 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4700 else: 4701 second = Key.from_path(first._tablename, long(second)) 4702 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4703
4704 - def GE(self,first,second=None):
4705 if first.type != 'id': 4706 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4707 else: 4708 second = Key.from_path(first._tablename, long(second)) 4709 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4710
4711 - def INVERT(self,first):
4712 return '-%s' % first.name
4713
4714 - def COMMA(self,first,second):
4715 return '%s, %s' % (self.expand(first),self.expand(second))
4716
4717 - def BELONGS(self,first,second=None):
4718 if not isinstance(second,(list, tuple)): 4719 raise SyntaxError("Not supported") 4720 if first.type != 'id': 4721 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4722 else: 4723 second = [Key.from_path(first._tablename, int(i)) for i in second] 4724 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4725
4726 - def CONTAINS(self,first,second,case_sensitive=False):
4727 # silently ignoring: GAE can only do case sensitive matches! 4728 if not first.type.startswith('list:'): 4729 raise SyntaxError("Not supported") 4730 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4731
4732 - def NOT(self,first):
4733 nops = { self.EQ: self.NE, 4734 self.NE: self.EQ, 4735 self.LT: self.GE, 4736 self.GT: self.LE, 4737 self.LE: self.GT, 4738 self.GE: self.LT} 4739 if not isinstance(first,Query): 4740 raise SyntaxError("Not suported") 4741 nop = nops.get(first.op,None) 4742 if not nop: 4743 raise SyntaxError("Not suported %s" % first.op.__name__) 4744 first.op = nop 4745 return self.expand(first)
4746
4747 - def truncate(self,table,mode):
4748 self.db(self.db._adapter.id_query(table)).delete()
4749
4750 - def select_raw(self,query,fields=None,attributes=None):
4751 db = self.db 4752 fields = fields or [] 4753 attributes = attributes or {} 4754 args_get = attributes.get 4755 new_fields = [] 4756 for item in fields: 4757 if isinstance(item,SQLALL): 4758 new_fields += item._table 4759 else: 4760 new_fields.append(item) 4761 fields = new_fields 4762 if query: 4763 tablename = self.get_table(query) 4764 elif fields: 4765 tablename = fields[0].tablename 4766 query = db._adapter.id_query(fields[0].table) 4767 else: 4768 raise SyntaxError("Unable to determine a tablename") 4769 4770 if query: 4771 if use_common_filters(query): 4772 query = self.common_filter(query,[tablename]) 4773 4774 #tableobj is a GAE Model class (or subclass) 4775 tableobj = db[tablename]._tableobj 4776 filters = self.expand(query) 4777 4778 projection = None 4779 if len(db[tablename].fields) == len(fields): 4780 #getting all fields, not a projection query 4781 projection = None 4782 elif args_get('projection') == True: 4783 projection = [] 4784 for f in fields: 4785 if f.type in ['text', 'blob', 'json']: 4786 raise SyntaxError( 4787 "text and blob field types not allowed in projection queries") 4788 else: 4789 projection.append(f.name) 4790 elif args_get('filterfields') == True: 4791 projection = [] 4792 for f in fields: 4793 projection.append(f.name) 4794 4795 # real projection's can't include 'id'. 4796 # it will be added to the result later 4797 query_projection = [ 4798 p for p in projection if \ 4799 p != db[tablename]._id.name] if projection and \ 4800 args_get('projection') == True\ 4801 else None 4802 4803 cursor = None 4804 if isinstance(args_get('reusecursor'), str): 4805 cursor = args_get('reusecursor') 4806 items = gae.Query(tableobj, projection=query_projection, 4807 cursor=cursor) 4808 4809 for filter in filters: 4810 if args_get('projection') == True and \ 4811 filter.name in query_projection and \ 4812 filter.op in ['=', '<=', '>=']: 4813 raise SyntaxError( 4814 "projection fields cannot have equality filters") 4815 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4816 continue 4817 elif filter.name=='__key__' and filter.op=='=': 4818 if filter.value==0: 4819 items = [] 4820 elif isinstance(filter.value, Key): 4821 # key qeuries return a class instance, 4822 # can't use projection 4823 # extra values will be ignored in post-processing later 4824 item = tableobj.get(filter.value) 4825 items = (item and [item]) or [] 4826 else: 4827 # key qeuries return a class instance, 4828 # can't use projection 4829 # extra values will be ignored in post-processing later 4830 item = tableobj.get_by_id(filter.value) 4831 items = (item and [item]) or [] 4832 elif isinstance(items,list): # i.e. there is a single record! 4833 items = [i for i in items if filter.apply( 4834 getattr(item,filter.name),filter.value)] 4835 else: 4836 if filter.name=='__key__' and filter.op != 'in': 4837 items.order('__key__') 4838 items = items.filter('%s %s' % (filter.name,filter.op), 4839 filter.value) 4840 if not isinstance(items,list): 4841 if args_get('left', None): 4842 raise SyntaxError('Set: no left join in appengine') 4843 if args_get('groupby', None): 4844 raise SyntaxError('Set: no groupby in appengine') 4845 orderby = args_get('orderby', False) 4846 if orderby: 4847 ### THIS REALLY NEEDS IMPROVEMENT !!! 4848 if isinstance(orderby, (list, tuple)): 4849 orderby = xorify(orderby) 4850 if isinstance(orderby,Expression): 4851 orderby = self.expand(orderby) 4852 orders = orderby.split(', ') 4853 for order in orders: 4854 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4855 items = items.order(order) 4856 if args_get('limitby', None): 4857 (lmin, lmax) = attributes['limitby'] 4858 (limit, offset) = (lmax - lmin, lmin) 4859 rows = items.fetch(limit,offset=offset) 4860 #cursor is only useful if there was a limit and we didn't return 4861 # all results 4862 if args_get('reusecursor'): 4863 db['_lastcursor'] = items.cursor() 4864 items = rows 4865 return (items, tablename, projection or db[tablename].fields)
4866
4867 - def select(self,query,fields,attributes):
4868 """ 4869 This is the GAE version of select. some notes to consider: 4870 - db['_lastsql'] is not set because there is not SQL statement string 4871 for a GAE query 4872 - 'nativeRef' is a magical fieldname used for self references on GAE 4873 - optional attribute 'projection' when set to True will trigger 4874 use of the GAE projection queries. note that there are rules for 4875 what is accepted imposed by GAE: each field must be indexed, 4876 projection queries cannot contain blob or text fields, and you 4877 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4878 - optional attribute 'filterfields' when set to True web2py will only 4879 parse the explicitly listed fields into the Rows object, even though 4880 all fields are returned in the query. This can be used to reduce 4881 memory usage in cases where true projection queries are not 4882 usable. 4883 - optional attribute 'reusecursor' allows use of cursor with queries 4884 that have the limitby attribute. Set the attribute to True for the 4885 first query, set it to the value of db['_lastcursor'] to continue 4886 a previous query. The user must save the cursor value between 4887 requests, and the filters must be identical. It is up to the user 4888 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4889 """ 4890 4891 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4892 # self.db['_lastsql'] = self._select(query,fields,attributes) 4893 rows = [[(t==self.db[tablename]._id.name and item) or \ 4894 (t=='nativeRef' and item) or getattr(item, t) \ 4895 for t in fields] for item in items] 4896 colnames = ['%s.%s' % (tablename, t) for t in fields] 4897 processor = attributes.get('processor',self.parse) 4898 return processor(rows,fields,colnames,False)
4899
4900 - def count(self,query,distinct=None,limit=None):
4901 if distinct: 4902 raise RuntimeError("COUNT DISTINCT not supported") 4903 (items, tablename, fields) = self.select_raw(query) 4904 # self.db['_lastsql'] = self._count(query) 4905 try: 4906 return len(items) 4907 except TypeError: 4908 return items.count(limit=limit)
4909
4910 - def delete(self,tablename, query):
4911 """ 4912 This function was changed on 2010-05-04 because according to 4913 http://code.google.com/p/googleappengine/issues/detail?id=3119 4914 GAE no longer supports deleting more than 1000 records. 4915 """ 4916 # self.db['_lastsql'] = self._delete(tablename,query) 4917 (items, tablename, fields) = self.select_raw(query) 4918 # items can be one item or a query 4919 if not isinstance(items,list): 4920 #use a keys_only query to ensure that this runs as a datastore 4921 # small operations 4922 leftitems = items.fetch(1000, keys_only=True) 4923 counter = 0 4924 while len(leftitems): 4925 counter += len(leftitems) 4926 gae.delete(leftitems) 4927 leftitems = items.fetch(1000, keys_only=True) 4928 else: 4929 counter = len(items) 4930 gae.delete(items) 4931 return counter
4932
4933 - def update(self,tablename,query,update_fields):
4934 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4935 (items, tablename, fields) = self.select_raw(query) 4936 counter = 0 4937 for item in items: 4938 for field, value in update_fields: 4939 setattr(item, field.name, self.represent(value,field.type)) 4940 item.put() 4941 counter += 1 4942 LOGGER.info(str(counter)) 4943 return counter
4944
4945 - def insert(self,table,fields):
4946 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4947 # table._db['_lastsql'] = self._insert(table,fields) 4948 tmp = table._tableobj(**dfields) 4949 tmp.put() 4950 rid = Reference(tmp.key().id()) 4951 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4952 return rid
4953
4954 - def bulk_insert(self,table,items):
4955 parsed_items = [] 4956 for item in items: 4957 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4958 parsed_items.append(table._tableobj(**dfields)) 4959 gae.put(parsed_items) 4960 return True
4961
4962 -def uuid2int(uuidv):
4963 return uuid.UUID(uuidv).int
4964
4965 -def int2uuid(n):
4966 return str(uuid.UUID(int=n))
4967
4968 -class CouchDBAdapter(NoSQLAdapter):
4969 drivers = ('couchdb',) 4970 4971 uploads_in_blob = True 4972 types = { 4973 'boolean': bool, 4974 'string': str, 4975 'text': str, 4976 'json': str, 4977 'password': str, 4978 'blob': str, 4979 'upload': str, 4980 'integer': long, 4981 'bigint': long, 4982 'float': float, 4983 'double': float, 4984 'date': datetime.date, 4985 'time': datetime.time, 4986 'datetime': datetime.datetime, 4987 'id': long, 4988 'reference': long, 4989 'list:string': list, 4990 'list:integer': list, 4991 'list:reference': list, 4992 } 4993
4994 - def file_exists(self, filename): pass
4995 - def file_open(self, filename, mode='rb', lock=True): pass
4996 - def file_close(self, fileobj): pass
4997
4998 - def expand(self,expression,field_type=None):
4999 if isinstance(expression,Field): 5000 if expression.type=='id': 5001 return "%s._id" % expression.tablename 5002 return BaseAdapter.expand(self,expression,field_type)
5003
5004 - def AND(self,first,second):
5005 return '(%s && %s)' % (self.expand(first),self.expand(second))
5006
5007 - def OR(self,first,second):
5008 return '(%s || %s)' % (self.expand(first),self.expand(second))
5009
5010 - def EQ(self,first,second):
5011 if second is None: 5012 return '(%s == null)' % self.expand(first) 5013 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5014
5015 - def NE(self,first,second):
5016 if second is None: 5017 return '(%s != null)' % self.expand(first) 5018 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5019
5020 - def COMMA(self,first,second):
5021 return '%s + %s' % (self.expand(first),self.expand(second))
5022
5023 - def represent(self, obj, fieldtype):
5024 value = NoSQLAdapter.represent(self, obj, fieldtype) 5025 if fieldtype=='id': 5026 return repr(str(long(value))) 5027 elif fieldtype in ('date','time','datetime','boolean'): 5028 return serializers.json(value) 5029 return repr(not isinstance(value,unicode) and value \ 5030 or value and value.encode('utf8'))
5031
5032 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5033 pool_size=0,folder=None,db_codec ='UTF-8', 5034 credential_decoder=IDENTITY, driver_args={}, 5035 adapter_args={}, do_connect=True, after_connection=None):
5036 self.db = db 5037 self.uri = uri 5038 if do_connect: self.find_driver(adapter_args) 5039 self.dbengine = 'couchdb' 5040 self.folder = folder 5041 db['_lastsql'] = '' 5042 self.db_codec = 'UTF-8' 5043 self._after_connection = after_connection 5044 self.pool_size = pool_size 5045 5046 url='http://'+uri[10:] 5047 def connector(url=url,driver_args=driver_args): 5048 return self.driver.Server(url,**driver_args)
5049 self.reconnect(connector,cursor=False)
5050
5051 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5052 if migrate: 5053 try: 5054 self.connection.create(table._tablename) 5055 except: 5056 pass
5057
5058 - def insert(self,table,fields):
5059 id = uuid2int(web2py_uuid()) 5060 ctable = self.connection[table._tablename] 5061 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5062 values['_id'] = str(id) 5063 ctable.save(values) 5064 return id
5065
5066 - def _select(self,query,fields,attributes):
5067 if not isinstance(query,Query): 5068 raise SyntaxError("Not Supported") 5069 for key in set(attributes.keys())-SELECT_ARGS: 5070 raise SyntaxError('invalid select attribute: %s' % key) 5071 new_fields=[] 5072 for item in fields: 5073 if isinstance(item,SQLALL): 5074 new_fields += item._table 5075 else: 5076 new_fields.append(item) 5077 def uid(fd): 5078 return fd=='id' and '_id' or fd
5079 def get(row,fd): 5080 return fd=='id' and long(row['_id']) or row.get(fd,None) 5081 fields = new_fields 5082 tablename = self.get_table(query) 5083 fieldnames = [f.name for f in (fields or self.db[tablename])] 5084 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5085 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5086 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5087 dict(t=tablename, 5088 query=self.expand(query), 5089 order='%s._id' % tablename, 5090 fields=fields) 5091 return fn, colnames 5092
5093 - def select(self,query,fields,attributes):
5094 if not isinstance(query,Query): 5095 raise SyntaxError("Not Supported") 5096 fn, colnames = self._select(query,fields,attributes) 5097 tablename = colnames[0].split('.')[0] 5098 ctable = self.connection[tablename] 5099 rows = [cols['value'] for cols in ctable.query(fn)] 5100 processor = attributes.get('processor',self.parse) 5101 return processor(rows,fields,colnames,False)
5102
5103 - def delete(self,tablename,query):
5104 if not isinstance(query,Query): 5105 raise SyntaxError("Not Supported") 5106 if query.first.type=='id' and query.op==self.EQ: 5107 id = query.second 5108 tablename = query.first.tablename 5109 assert(tablename == query.first.tablename) 5110 ctable = self.connection[tablename] 5111 try: 5112 del ctable[str(id)] 5113 return 1 5114 except couchdb.http.ResourceNotFound: 5115 return 0 5116 else: 5117 tablename = self.get_table(query) 5118 rows = self.select(query,[self.db[tablename]._id],{}) 5119 ctable = self.connection[tablename] 5120 for row in rows: 5121 del ctable[str(row.id)] 5122 return len(rows)
5123
5124 - def update(self,tablename,query,fields):
5125 if not isinstance(query,Query): 5126 raise SyntaxError("Not Supported") 5127 if query.first.type=='id' and query.op==self.EQ: 5128 id = query.second 5129 tablename = query.first.tablename 5130 ctable = self.connection[tablename] 5131 try: 5132 doc = ctable[str(id)] 5133 for key,value in fields: 5134 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5135 ctable.save(doc) 5136 return 1 5137 except couchdb.http.ResourceNotFound: 5138 return 0 5139 else: 5140 tablename = self.get_table(query) 5141 rows = self.select(query,[self.db[tablename]._id],{}) 5142 ctable = self.connection[tablename] 5143 table = self.db[tablename] 5144 for row in rows: 5145 doc = ctable[str(row.id)] 5146 for key,value in fields: 5147 doc[key.name] = self.represent(value,table[key.name].type) 5148 ctable.save(doc) 5149 return len(rows)
5150
5151 - def count(self,query,distinct=None):
5152 if distinct: 5153 raise RuntimeError("COUNT DISTINCT not supported") 5154 if not isinstance(query,Query): 5155 raise SyntaxError("Not Supported") 5156 tablename = self.get_table(query) 5157 rows = self.select(query,[self.db[tablename]._id],{}) 5158 return len(rows)
5159
5160 -def cleanup(text):
5161 """ 5162 validates that the given text is clean: only contains [0-9a-zA-Z_] 5163 """ 5164 if not REGEX_ALPHANUMERIC.match(text): 5165 raise SyntaxError('invalid table or field name: %s' % text) 5166 return text
5167
5168 -class MongoDBAdapter(NoSQLAdapter):
5169 native_json = True 5170 drivers = ('pymongo',) 5171 5172 uploads_in_blob = True 5173 5174 types = { 5175 'boolean': bool, 5176 'string': str, 5177 'text': str, 5178 'json': str, 5179 'password': str, 5180 'blob': str, 5181 'upload': str, 5182 'integer': long, 5183 'bigint': long, 5184 'float': float, 5185 'double': float, 5186 'date': datetime.date, 5187 'time': datetime.time, 5188 'datetime': datetime.datetime, 5189 'id': long, 5190 'reference': long, 5191 'list:string': list, 5192 'list:integer': list, 5193 'list:reference': list, 5194 } 5195 5196 error_messages = {"javascript_needed": "This must yet be replaced" + 5197 " with javascript in order to work."} 5198
5199 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5200 pool_size=0, folder=None, db_codec ='UTF-8', 5201 credential_decoder=IDENTITY, driver_args={}, 5202 adapter_args={}, do_connect=True, after_connection=None):
5203 5204 self.db = db 5205 self.uri = uri 5206 if do_connect: self.find_driver(adapter_args) 5207 import random 5208 from bson.objectid import ObjectId 5209 from bson.son import SON 5210 import pymongo.uri_parser 5211 5212 m = pymongo.uri_parser.parse_uri(uri) 5213 5214 self.SON = SON 5215 self.ObjectId = ObjectId 5216 self.random = random 5217 5218 self.dbengine = 'mongodb' 5219 self.folder = folder 5220 db['_lastsql'] = '' 5221 self.db_codec = 'UTF-8' 5222 self._after_connection = after_connection 5223 self.pool_size = pool_size 5224 #this is the minimum amount of replicates that it should wait 5225 # for on insert/update 5226 self.minimumreplication = adapter_args.get('minimumreplication',0) 5227 # by default all inserts and selects are performand asynchronous, 5228 # but now the default is 5229 # synchronous, except when overruled by either this default or 5230 # function parameter 5231 self.safe = adapter_args.get('safe',True) 5232 5233 if isinstance(m,tuple): 5234 m = {"database" : m[1]} 5235 if m.get('database')==None: 5236 raise SyntaxError("Database is required!") 5237 def connector(uri=self.uri,m=m): 5238 try: 5239 # Connection() is deprecated 5240 if hasattr(self.driver, "MongoClient"): 5241 Connection = self.driver.MongoClient 5242 else: 5243 Connection = self.driver.Connection 5244 return Connection(uri)[m.get('database')] 5245 except self.driver.errors.ConnectionFailure: 5246 inst = sys.exc_info()[1] 5247 raise SyntaxError("The connection to " + 5248 uri + " could not be made")
5249 5250 self.reconnect(connector,cursor=False)
5251
5252 - def object_id(self, arg=None):
5253 """ Convert input to a valid Mongodb ObjectId instance 5254 5255 self.object_id("<random>") -> ObjectId (not unique) instance """ 5256 if not arg: 5257 arg = 0 5258 if isinstance(arg, basestring): 5259 # we assume an integer as default input 5260 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5261 if arg.isdigit() and (not rawhex): 5262 arg = int(arg) 5263 elif arg == "<random>": 5264 arg = int("0x%sL" % \ 5265 "".join([self.random.choice("0123456789abcdef") \ 5266 for x in range(24)]), 0) 5267 elif arg.isalnum(): 5268 if not arg.startswith("0x"): 5269 arg = "0x%s" % arg 5270 try: 5271 arg = int(arg, 0) 5272 except ValueError, e: 5273 raise ValueError( 5274 "invalid objectid argument string: %s" % e) 5275 else: 5276 raise ValueError("Invalid objectid argument string. " + 5277 "Requires an integer or base 16 value") 5278 elif isinstance(arg, self.ObjectId): 5279 return arg 5280 5281 if not isinstance(arg, (int, long)): 5282 raise TypeError("object_id argument must be of type " + 5283 "ObjectId or an objectid representable integer") 5284 if arg == 0: 5285 hexvalue = "".zfill(24) 5286 else: 5287 hexvalue = hex(arg)[2:].replace("L", "") 5288 return self.ObjectId(hexvalue)
5289
5290 - def parse_reference(self, value, field_type):
5291 # here we have to check for ObjectID before base parse 5292 if isinstance(value, self.ObjectId): 5293 value = long(str(value), 16) 5294 return super(MongoDBAdapter, 5295 self).parse_reference(value, field_type)
5296
5297 - def parse_id(self, value, field_type):
5298 if isinstance(value, self.ObjectId): 5299 value = long(str(value), 16) 5300 return super(MongoDBAdapter, 5301 self).parse_id(value, field_type)
5302
5303 - def represent(self, obj, fieldtype):
5304 # the base adatpter does not support MongoDB ObjectId 5305 if isinstance(obj, self.ObjectId): 5306 value = obj 5307 else: 5308 value = NoSQLAdapter.represent(self, obj, fieldtype) 5309 # reference types must be convert to ObjectID 5310 if fieldtype =='date': 5311 if value == None: 5312 return value 5313 # this piece of data can be stripped off based on the fieldtype 5314 t = datetime.time(0, 0, 0) 5315 # mongodb doesn't has a date object and so it must datetime, 5316 # string or integer 5317 return datetime.datetime.combine(value, t) 5318 elif fieldtype == 'time': 5319 if value == None: 5320 return value 5321 # this piece of data can be stripped of based on the fieldtype 5322 d = datetime.date(2000, 1, 1) 5323 # mongodb doesn't has a time object and so it must datetime, 5324 # string or integer 5325 return datetime.datetime.combine(d, value) 5326 elif (isinstance(fieldtype, basestring) and 5327 fieldtype.startswith('list:')): 5328 if fieldtype.startswith('list:reference'): 5329 newval = [] 5330 for v in value: 5331 newval.append(self.object_id(v)) 5332 return newval 5333 return value 5334 elif ((isinstance(fieldtype, basestring) and 5335 fieldtype.startswith("reference")) or 5336 (isinstance(fieldtype, Table))): 5337 value = self.object_id(value) 5338 5339 return value
5340 5341 # Safe determines whether a asynchronious request is done or a 5342 # synchronious action is done 5343 # For safety, we use by default synchronous requests
5344 - def insert(self, table, fields, safe=None):
5345 if safe==None: 5346 safe = self.safe 5347 ctable = self.connection[table._tablename] 5348 values = dict() 5349 for k, v in fields: 5350 if not k.name in ["id", "safe"]: 5351 fieldname = k.name 5352 fieldtype = table[k.name].type 5353 if ("reference" in fieldtype) or (fieldtype=="id"): 5354 values[fieldname] = self.object_id(v) 5355 else: 5356 values[fieldname] = self.represent(v, fieldtype) 5357 ctable.insert(values, safe=safe) 5358 return long(str(values['_id']), 16)
5359
5360 - def create_table(self, table, migrate=True, fake_migrate=False, 5361 polymodel=None, isCapped=False):
5362 if isCapped: 5363 raise RuntimeError("Not implemented")
5364
5365 - def count(self, query, distinct=None, snapshot=True):
5366 if distinct: 5367 raise RuntimeError("COUNT DISTINCT not supported") 5368 if not isinstance(query,Query): 5369 raise SyntaxError("Not Supported") 5370 tablename = self.get_table(query) 5371 return long(self.select(query,[self.db[tablename]._id], {}, 5372 count=True,snapshot=snapshot)['count'])
5373 # Maybe it would be faster if we just implemented the pymongo 5374 # .count() function which is probably quicker? 5375 # therefor call __select() connection[table].find(query).count() 5376 # Since this will probably reduce the return set? 5377
5378 - def expand(self, expression, field_type=None):
5379 if isinstance(expression, Query): 5380 # any query using 'id':= 5381 # set name as _id (as per pymongo/mongodb primary key) 5382 # convert second arg to an objectid field 5383 # (if its not already) 5384 # if second arg is 0 convert to objectid 5385 if isinstance(expression.first,Field) and \ 5386 ((expression.first.type == 'id') or \ 5387 ("reference" in expression.first.type)): 5388 if expression.first.type == 'id': 5389 expression.first.name = '_id' 5390 # cast to Mongo ObjectId 5391 if isinstance(expression.second, (tuple, list, set)): 5392 expression.second = [self.object_id(item) for 5393 item in expression.second] 5394 else: 5395 expression.second = self.object_id(expression.second) 5396 result = expression.op(expression.first, expression.second) 5397 5398 if isinstance(expression, Field): 5399 if expression.type=='id': 5400 result = "_id" 5401 else: 5402 result = expression.name 5403 elif isinstance(expression, (Expression, Query)): 5404 if not expression.second is None: 5405 result = expression.op(expression.first, expression.second) 5406 elif not expression.first is None: 5407 result = expression.op(expression.first) 5408 elif not isinstance(expression.op, str): 5409 result = expression.op() 5410 else: 5411 result = expression.op 5412 elif field_type: 5413 result = self.represent(expression,field_type) 5414 elif isinstance(expression,(list,tuple)): 5415 result = ','.join(self.represent(item,field_type) for 5416 item in expression) 5417 else: 5418 result = expression 5419 return result
5420
5421 - def _select(self, query, fields, attributes):
5422 if 'for_update' in attributes: 5423 logging.warn('mongodb does not support for_update') 5424 for key in set(attributes.keys())-set(('limitby', 5425 'orderby','for_update')): 5426 if attributes[key]!=None: 5427 logging.warn('select attribute not implemented: %s' % key) 5428 5429 new_fields=[] 5430 mongosort_list = [] 5431 5432 # try an orderby attribute 5433 orderby = attributes.get('orderby', False) 5434 limitby = attributes.get('limitby', False) 5435 # distinct = attributes.get('distinct', False) 5436 if orderby: 5437 if isinstance(orderby, (list, tuple)): 5438 orderby = xorify(orderby) 5439 5440 # !!!! need to add 'random' 5441 for f in self.expand(orderby).split(','): 5442 if f.startswith('-'): 5443 mongosort_list.append((f[1:], -1)) 5444 else: 5445 mongosort_list.append((f, 1)) 5446 5447 if limitby: 5448 limitby_skip, limitby_limit = limitby 5449 else: 5450 limitby_skip = limitby_limit = 0 5451 5452 mongofields_dict = self.SON() 5453 mongoqry_dict = {} 5454 for item in fields: 5455 if isinstance(item, SQLALL): 5456 new_fields += item._table 5457 else: 5458 new_fields.append(item) 5459 fields = new_fields 5460 if isinstance(query,Query): 5461 tablename = self.get_table(query) 5462 elif len(fields) != 0: 5463 tablename = fields[0].tablename 5464 else: 5465 raise SyntaxError("The table name could not be found in " + 5466 "the query nor from the select statement.") 5467 mongoqry_dict = self.expand(query) 5468 fields = fields or self.db[tablename] 5469 for field in fields: 5470 mongofields_dict[field.name] = 1 5471 5472 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5473 limitby_limit, limitby_skip
5474 5475
5476 - def select(self, query, fields, attributes, count=False, 5477 snapshot=False):
5478 # TODO: support joins 5479 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5480 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5481 ctable = self.connection[tablename] 5482 5483 if count: 5484 return {'count' : ctable.find( 5485 mongoqry_dict, mongofields_dict, 5486 skip=limitby_skip, limit=limitby_limit, 5487 sort=mongosort_list, snapshot=snapshot).count()} 5488 else: 5489 # pymongo cursor object 5490 mongo_list_dicts = ctable.find(mongoqry_dict, 5491 mongofields_dict, skip=limitby_skip, 5492 limit=limitby_limit, sort=mongosort_list, 5493 snapshot=snapshot) 5494 rows = [] 5495 # populate row in proper order 5496 # Here we replace ._id with .id to follow the standard naming 5497 colnames = [] 5498 newnames = [] 5499 for field in fields: 5500 colname = str(field) 5501 colnames.append(colname) 5502 tablename, fieldname = colname.split(".") 5503 if fieldname == "_id": 5504 # Mongodb reserved uuid key 5505 field.name = "id" 5506 newnames.append(".".join((tablename, field.name))) 5507 5508 for record in mongo_list_dicts: 5509 row=[] 5510 for colname in colnames: 5511 tablename, fieldname = colname.split(".") 5512 # switch to Mongo _id uuids for retrieving 5513 # record id's 5514 if fieldname == "id": fieldname = "_id" 5515 if fieldname in record: 5516 value = record[fieldname] 5517 else: 5518 value = None 5519 row.append(value) 5520 rows.append(row) 5521 5522 processor = attributes.get('processor', self.parse) 5523 result = processor(rows, fields, newnames, False) 5524 return result
5525 5526
5527 - def INVERT(self, first):
5528 #print "in invert first=%s" % first 5529 return '-%s' % self.expand(first)
5530
5531 - def drop(self, table, mode=''):
5532 ctable = self.connection[table._tablename] 5533 ctable.drop()
5534 5535
5536 - def truncate(self, table, mode, safe=None):
5537 if safe == None: 5538 safe=self.safe 5539 ctable = self.connection[table._tablename] 5540 ctable.remove(None, safe=True)
5541
5542 - def oupdate(self, tablename, query, fields):
5543 if not isinstance(query, Query): 5544 raise SyntaxError("Not Supported") 5545 filter = None 5546 if query: 5547 filter = self.expand(query) 5548 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5549 k, v in fields)} 5550 return modify, filter
5551
5552 - def update(self, tablename, query, fields, safe=None):
5553 if safe == None: 5554 safe = self.safe 5555 # return amount of adjusted rows or zero, but no exceptions 5556 # @ related not finding the result 5557 if not isinstance(query, Query): 5558 raise RuntimeError("Not implemented") 5559 amount = self.count(query, False) 5560 modify, filter = self.oupdate(tablename, query, fields) 5561 try: 5562 result = self.connection[tablename].update(filter, 5563 modify, multi=True, safe=safe) 5564 if safe: 5565 try: 5566 # if result count is available fetch it 5567 return result["n"] 5568 except (KeyError, AttributeError, TypeError): 5569 return amount 5570 else: 5571 return amount 5572 except Exception, e: 5573 # TODO Reverse update query to verifiy that the query succeded 5574 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5575 5576 #this function returns a dict with the where clause and update fields
5577 - def _update(self,tablename,query,fields):
5578 return str(self.oupdate(tablename, query, fields))
5579
5580 - def delete(self, tablename, query, safe=None):
5581 if safe is None: 5582 safe = self.safe 5583 amount = 0 5584 amount = self.count(query, False) 5585 if not isinstance(query, Query): 5586 raise RuntimeError("query type %s is not supported" % \ 5587 type(query)) 5588 filter = self.expand(query) 5589 self._delete(tablename, filter, safe=safe) 5590 return amount
5591
5592 - def _delete(self, tablename, filter, safe=None):
5593 return self.connection[tablename].remove(filter, safe=safe)
5594
5595 - def bulk_insert(self, table, items):
5596 return [self.insert(table,item) for item in items]
5597 5598 # TODO This will probably not work:(
5599 - def NOT(self, first):
5600 result = {} 5601 result["$not"] = self.expand(first) 5602 return result
5603
5604 - def AND(self,first,second):
5605 f = self.expand(first) 5606 s = self.expand(second) 5607 f.update(s) 5608 return f
5609
5610 - def OR(self,first,second):
5611 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5612 result = {} 5613 f = self.expand(first) 5614 s = self.expand(second) 5615 result['$or'] = [f,s] 5616 return result
5617
5618 - def BELONGS(self, first, second):
5619 if isinstance(second, str): 5620 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5621 elif second==[] or second==() or second==set(): 5622 return {1:0} 5623 items = [self.expand(item, first.type) for item in second] 5624 return {self.expand(first) : {"$in" : items} }
5625
5626 - def EQ(self,first,second):
5627 result = {} 5628 result[self.expand(first)] = self.expand(second) 5629 return result
5630
5631 - def NE(self, first, second=None):
5632 result = {} 5633 result[self.expand(first)] = {'$ne': self.expand(second)} 5634 return result
5635
5636 - def LT(self,first,second=None):
5637 if second is None: 5638 raise RuntimeError("Cannot compare %s < None" % first) 5639 result = {} 5640 result[self.expand(first)] = {'$lt': self.expand(second)} 5641 return result
5642
5643 - def LE(self,first,second=None):
5644 if second is None: 5645 raise RuntimeError("Cannot compare %s <= None" % first) 5646 result = {} 5647 result[self.expand(first)] = {'$lte': self.expand(second)} 5648 return result
5649
5650 - def GT(self,first,second):
5651 result = {} 5652 result[self.expand(first)] = {'$gt': self.expand(second)} 5653 return result
5654
5655 - def GE(self,first,second=None):
5656 if second is None: 5657 raise RuntimeError("Cannot compare %s >= None" % first) 5658 result = {} 5659 result[self.expand(first)] = {'$gte': self.expand(second)} 5660 return result
5661
5662 - def ADD(self, first, second):
5663 raise NotImplementedError(self.error_messages["javascript_needed"]) 5664 return '%s + %s' % (self.expand(first), 5665 self.expand(second, first.type))
5666
5667 - def SUB(self, first, second):
5668 raise NotImplementedError(self.error_messages["javascript_needed"]) 5669 return '(%s - %s)' % (self.expand(first), 5670 self.expand(second, first.type))
5671
5672 - def MUL(self, first, second):
5673 raise NotImplementedError(self.error_messages["javascript_needed"]) 5674 return '(%s * %s)' % (self.expand(first), 5675 self.expand(second, first.type))
5676
5677 - def DIV(self, first, second):
5678 raise NotImplementedError(self.error_messages["javascript_needed"]) 5679 return '(%s / %s)' % (self.expand(first), 5680 self.expand(second, first.type))
5681
5682 - def MOD(self, first, second):
5683 raise NotImplementedError(self.error_messages["javascript_needed"]) 5684 return '(%s %% %s)' % (self.expand(first), 5685 self.expand(second, first.type))
5686
5687 - def AS(self, first, second):
5688 raise NotImplementedError(self.error_messages["javascript_needed"]) 5689 return '%s AS %s' % (self.expand(first), second)
5690 5691 # We could implement an option that simulates a full featured SQL 5692 # database. But I think the option should be set explicit or 5693 # implemented as another library.
5694 - def ON(self, first, second):
5695 raise NotImplementedError("This is not possible in NoSQL" + 5696 " but can be simulated with a wrapper.") 5697 return '%s ON %s' % (self.expand(first), self.expand(second))
5698 5699 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5700 # WHICH ONE IS BEST? 5701
5702 - def COMMA(self, first, second):
5703 return '%s, %s' % (self.expand(first), self.expand(second))
5704
5705 - def LIKE(self, first, second):
5706 #escaping regex operators? 5707 return {self.expand(first): ('%s' % \ 5708 self.expand(second, 'string').replace('%','/'))}
5709
5710 - def STARTSWITH(self, first, second):
5711 #escaping regex operators? 5712 return {self.expand(first): ('/^%s/' % \ 5713 self.expand(second, 'string'))}
5714
5715 - def ENDSWITH(self, first, second):
5716 #escaping regex operators? 5717 return {self.expand(first): ('/%s^/' % \ 5718 self.expand(second, 'string'))}
5719
5720 - def CONTAINS(self, first, second, case_sensitive=False):
5721 # silently ignore, only case sensitive 5722 # There is a technical difference, but mongodb doesn't support 5723 # that, but the result will be the same 5724 return {self.expand(first) : ('/%s/' % \ 5725 self.expand(second, 'string'))}
5726
5727 - def LIKE(self, first, second):
5728 import re 5729 return {self.expand(first): {'$regex': \ 5730 re.escape(self.expand(second, 5731 'string')).replace('%','.*')}}
5732 5733 #TODO verify full compatibilty with official SQL Like operator
5734 - def STARTSWITH(self, first, second):
5735 #TODO Solve almost the same problem as with endswith 5736 import re 5737 return {self.expand(first): {'$regex' : '^' + 5738 re.escape(self.expand(second, 5739 'string'))}}
5740 5741 #TODO verify full compatibilty with official SQL Like operator
5742 - def ENDSWITH(self, first, second):
5743 #escaping regex operators? 5744 #TODO if searched for a name like zsa_corbitt and the function 5745 # is endswith('a') then this is also returned. 5746 # Aldo it end with a t 5747 import re 5748 return {self.expand(first): {'$regex': \ 5749 re.escape(self.expand(second, 'string')) + '$'}}
5750 5751 #TODO verify full compatibilty with official oracle contains operator
5752 - def CONTAINS(self, first, second, case_sensitive=False):
5753 # silently ignore, only case sensitive 5754 #There is a technical difference, but mongodb doesn't support 5755 # that, but the result will be the same 5756 #TODO contains operators need to be transformed to Regex 5757 return {self.expand(first) : {' $regex': \ 5758 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5759
5760 5761 -class IMAPAdapter(NoSQLAdapter):
5762 drivers = ('imaplib',) 5763 5764 """ IMAP server adapter 5765 5766 This class is intended as an interface with 5767 email IMAP servers to perform simple queries in the 5768 web2py DAL query syntax, so email read, search and 5769 other related IMAP mail services (as those implemented 5770 by brands like Google(r), and Yahoo!(r) 5771 can be managed from web2py applications. 5772 5773 The code uses examples by Yuji Tomita on this post: 5774 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5775 and is based in docs for Python imaplib, python email 5776 and email IETF's (i.e. RFC2060 and RFC3501) 5777 5778 This adapter was tested with a small set of operations with Gmail(r). Other 5779 services requests could raise command syntax and response data issues. 5780 5781 It creates its table and field names "statically", 5782 meaning that the developer should leave the table and field 5783 definitions to the DAL instance by calling the adapter's 5784 .define_tables() method. The tables are defined with the 5785 IMAP server mailbox list information. 5786 5787 .define_tables() returns a dictionary mapping dal tablenames 5788 to the server mailbox names with the following structure: 5789 5790 {<tablename>: str <server mailbox name>} 5791 5792 Here is a list of supported fields: 5793 5794 Field Type Description 5795 ################################################################ 5796 uid string 5797 answered boolean Flag 5798 created date 5799 content list:string A list of text or html parts 5800 to string 5801 cc string 5802 bcc string 5803 size integer the amount of octets of the message* 5804 deleted boolean Flag 5805 draft boolean Flag 5806 flagged boolean Flag 5807 sender string 5808 recent boolean Flag 5809 seen boolean Flag 5810 subject string 5811 mime string The mime header declaration 5812 email string The complete RFC822 message** 5813 attachments <type list> Each non text part as dict 5814 encoding string The main detected encoding 5815 5816 *At the application side it is measured as the length of the RFC822 5817 message string 5818 5819 WARNING: As row id's are mapped to email sequence numbers, 5820 make sure your imap client web2py app does not delete messages 5821 during select or update actions, to prevent 5822 updating or deleting different messages. 5823 Sequence numbers change whenever the mailbox is updated. 5824 To avoid this sequence numbers issues, it is recommended the use 5825 of uid fields in query references (although the update and delete 5826 in separate actions rule still applies). 5827 5828 # This is the code recommended to start imap support 5829 # at the app's model: 5830 5831 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5832 imapdb.define_tables() 5833 5834 Here is an (incomplete) list of possible imap commands: 5835 5836 # Count today's unseen messages 5837 # smaller than 6000 octets from the 5838 # inbox mailbox 5839 5840 q = imapdb.INBOX.seen == False 5841 q &= imapdb.INBOX.created == datetime.date.today() 5842 q &= imapdb.INBOX.size < 6000 5843 unread = imapdb(q).count() 5844 5845 # Fetch last query messages 5846 rows = imapdb(q).select() 5847 5848 # it is also possible to filter query select results with limitby and 5849 # sequences of mailbox fields 5850 5851 set.select(<fields sequence>, limitby=(<int>, <int>)) 5852 5853 # Mark last query messages as seen 5854 messages = [row.uid for row in rows] 5855 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5856 5857 # Delete messages in the imap database that have mails from mr. Gumby 5858 5859 deleted = 0 5860 for mailbox in imapdb.tables 5861 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5862 5863 # It is possible also to mark messages for deletion instead of ereasing them 5864 # directly with set.update(deleted=True) 5865 5866 5867 # This object give access 5868 # to the adapter auto mailbox 5869 # mapped names (which native 5870 # mailbox has what table name) 5871 5872 imapdb.mailboxes <dict> # tablename, server native name pairs 5873 5874 # To retrieve a table native mailbox name use: 5875 imapdb.<table>.mailbox 5876 5877 ### New features v2.4.1: 5878 5879 # Declare mailboxes statically with tablename, name pairs 5880 # This avoids the extra server names retrieval 5881 5882 imapdb.define_tables({"inbox": "INBOX"}) 5883 5884 # Selects without content/attachments/email columns will only 5885 # fetch header and flags 5886 5887 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5888 """ 5889 5890 types = { 5891 'string': str, 5892 'text': str, 5893 'date': datetime.date, 5894 'datetime': datetime.datetime, 5895 'id': long, 5896 'boolean': bool, 5897 'integer': int, 5898 'bigint': long, 5899 'blob': str, 5900 'list:string': str, 5901 } 5902 5903 dbengine = 'imap' 5904 5905 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5906
5907 - def __init__(self, 5908 db, 5909 uri, 5910 pool_size=0, 5911 folder=None, 5912 db_codec ='UTF-8', 5913 credential_decoder=IDENTITY, 5914 driver_args={}, 5915 adapter_args={}, 5916 do_connect=True, 5917 after_connection=None):
5918 5919 # db uri: user@example.com:password@imap.server.com:123 5920 # TODO: max size adapter argument for preventing large mail transfers 5921 5922 self.db = db 5923 self.uri = uri 5924 if do_connect: self.find_driver(adapter_args) 5925 self.pool_size=pool_size 5926 self.folder = folder 5927 self.db_codec = db_codec 5928 self._after_connection = after_connection 5929 self.credential_decoder = credential_decoder 5930 self.driver_args = driver_args 5931 self.adapter_args = adapter_args 5932 self.mailbox_size = None 5933 self.static_names = None 5934 self.charset = sys.getfilesystemencoding() 5935 # imap class 5936 self.imap4 = None 5937 uri = uri.split("://")[1] 5938 5939 """ MESSAGE is an identifier for sequence number""" 5940 5941 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5942 '\\Recent', '\\Seen', '\\Answered'] 5943 self.search_fields = { 5944 'id': 'MESSAGE', 'created': 'DATE', 5945 'uid': 'UID', 'sender': 'FROM', 5946 'to': 'TO', 'cc': 'CC', 5947 'bcc': 'BCC', 'content': 'TEXT', 5948 'size': 'SIZE', 'deleted': '\\Deleted', 5949 'draft': '\\Draft', 'flagged': '\\Flagged', 5950 'recent': '\\Recent', 'seen': '\\Seen', 5951 'subject': 'SUBJECT', 'answered': '\\Answered', 5952 'mime': None, 'email': None, 5953 'attachments': None 5954 } 5955 5956 db['_lastsql'] = '' 5957 5958 m = self.REGEX_URI.match(uri) 5959 user = m.group('user') 5960 password = m.group('password') 5961 host = m.group('host') 5962 port = int(m.group('port')) 5963 over_ssl = False 5964 if port==993: 5965 over_ssl = True 5966 5967 driver_args.update(host=host,port=port, password=password, user=user) 5968 def connector(driver_args=driver_args): 5969 # it is assumed sucessful authentication alLways 5970 # TODO: support direct connection and login tests 5971 if over_ssl: 5972 self.imap4 = self.driver.IMAP4_SSL 5973 else: 5974 self.imap4 = self.driver.IMAP4 5975 connection = self.imap4(driver_args["host"], driver_args["port"]) 5976 data = connection.login(driver_args["user"], driver_args["password"]) 5977 5978 # static mailbox list 5979 connection.mailbox_names = None 5980 5981 # dummy cursor function 5982 connection.cursor = lambda : True 5983 5984 return connection
5985 5986 self.db.define_tables = self.define_tables 5987 self.connector = connector 5988 if do_connect: self.reconnect()
5989
5990 - def reconnect(self, f=None, cursor=True):
5991 """ 5992 IMAP4 Pool connection method 5993 5994 imap connection lacks of self cursor command. 5995 A custom command should be provided as a replacement 5996 for connection pooling to prevent uncaught remote session 5997 closing 5998 5999 """ 6000 if getattr(self,'connection',None) != None: 6001 return 6002 if f is None: 6003 f = self.connector 6004 6005 if not self.pool_size: 6006 self.connection = f() 6007 self.cursor = cursor and self.connection.cursor() 6008 else: 6009 POOLS = ConnectionPool.POOLS 6010 uri = self.uri 6011 while True: 6012 GLOBAL_LOCKER.acquire() 6013 if not uri in POOLS: 6014 POOLS[uri] = [] 6015 if POOLS[uri]: 6016 self.connection = POOLS[uri].pop() 6017 GLOBAL_LOCKER.release() 6018 self.cursor = cursor and self.connection.cursor() 6019 if self.cursor and self.check_active_connection: 6020 try: 6021 # check if connection is alive or close it 6022 result, data = self.connection.list() 6023 except: 6024 # Possible connection reset error 6025 # TODO: read exception class 6026 self.connection = f() 6027 break 6028 else: 6029 GLOBAL_LOCKER.release() 6030 self.connection = f() 6031 self.cursor = cursor and self.connection.cursor() 6032 break 6033 self.after_connection_hook()
6034
6035 - def get_last_message(self, tablename):
6036 last_message = None 6037 # request mailbox list to the server 6038 # if needed 6039 if not isinstance(self.connection.mailbox_names, dict): 6040 self.get_mailboxes() 6041 try: 6042 result = self.connection.select(self.connection.mailbox_names[tablename]) 6043 last_message = int(result[1][0]) 6044 except (IndexError, ValueError, TypeError, KeyError): 6045 e = sys.exc_info()[1] 6046 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6047 return last_message
6048
6049 - def get_uid_bounds(self, tablename):
6050 if not isinstance(self.connection.mailbox_names, dict): 6051 self.get_mailboxes() 6052 # fetch first and last messages 6053 # return (first, last) messages uid's 6054 last_message = self.get_last_message(tablename) 6055 result, data = self.connection.uid("search", None, "(ALL)") 6056 uid_list = data[0].strip().split() 6057 if len(uid_list) <= 0: 6058 return None 6059 else: 6060 return (uid_list[0], uid_list[-1])
6061
6062 - def convert_date(self, date, add=None):
6063 if add is None: 6064 add = datetime.timedelta() 6065 """ Convert a date object to a string 6066 with d-Mon-Y style for IMAP or the inverse 6067 case 6068 6069 add <timedelta> adds to the date object 6070 """ 6071 months = [None, "Jan","Feb","Mar","Apr","May","Jun", 6072 "Jul", "Aug","Sep","Oct","Nov","Dec"] 6073 if isinstance(date, basestring): 6074 # Prevent unexpected date response format 6075 try: 6076 dayname, datestring = date.split(",") 6077 except (ValueError): 6078 LOGGER.debug("Could not parse date text: %s" % date) 6079 return None 6080 date_list = datestring.strip().split() 6081 year = int(date_list[2]) 6082 month = months.index(date_list[1]) 6083 day = int(date_list[0]) 6084 hms = map(int, date_list[3].split(":")) 6085 return datetime.datetime(year, month, day, 6086 hms[0], hms[1], hms[2]) + add 6087 elif isinstance(date, (datetime.datetime, datetime.date)): 6088 return (date + add).strftime("%d-%b-%Y") 6089 6090 else: 6091 return None
6092 6093 @staticmethod
6094 - def header_represent(f, r):
6095 from email.header import decode_header 6096 text, encoding = decode_header(f)[0] 6097 return text
6098
6099 - def encode_text(self, text, charset, errors="replace"):
6100 """ convert text for mail to unicode""" 6101 if text is None: 6102 text = "" 6103 else: 6104 if isinstance(text, str): 6105 if charset is None: 6106 text = unicode(text, "utf-8", errors) 6107 else: 6108 text = unicode(text, charset, errors) 6109 else: 6110 raise Exception("Unsupported mail text type %s" % type(text)) 6111 return text.encode("utf-8")
6112
6113 - def get_charset(self, message):
6114 charset = message.get_content_charset() 6115 return charset
6116
6117 - def get_mailboxes(self):
6118 """ Query the mail database for mailbox names """ 6119 if self.static_names: 6120 # statically defined mailbox names 6121 self.connection.mailbox_names = self.static_names 6122 return self.static_names.keys() 6123 6124 mailboxes_list = self.connection.list() 6125 self.connection.mailbox_names = dict() 6126 mailboxes = list() 6127 x = 0 6128 for item in mailboxes_list[1]: 6129 x = x + 1 6130 item = item.strip() 6131 if not "NOSELECT" in item.upper(): 6132 sub_items = item.split("\"") 6133 sub_items = [sub_item for sub_item in sub_items \ 6134 if len(sub_item.strip()) > 0] 6135 # mailbox = sub_items[len(sub_items) -1] 6136 mailbox = sub_items[-1] 6137 # remove unwanted characters and store original names 6138 # Don't allow leading non alphabetic characters 6139 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6140 mailboxes.append(mailbox_name) 6141 self.connection.mailbox_names[mailbox_name] = mailbox 6142 6143 return mailboxes
6144
6145 - def get_query_mailbox(self, query):
6146 nofield = True 6147 tablename = None 6148 attr = query 6149 while nofield: 6150 if hasattr(attr, "first"): 6151 attr = attr.first 6152 if isinstance(attr, Field): 6153 return attr.tablename 6154 elif isinstance(attr, Query): 6155 pass 6156 else: 6157 return None 6158 else: 6159 return None 6160 return tablename
6161
6162 - def is_flag(self, flag):
6163 if self.search_fields.get(flag, None) in self.flags: 6164 return True 6165 else: 6166 return False
6167
6168 - def define_tables(self, mailbox_names=None):
6169 """ 6170 Auto create common IMAP fileds 6171 6172 This function creates fields definitions "statically" 6173 meaning that custom fields as in other adapters should 6174 not be supported and definitions handled on a service/mode 6175 basis (local syntax for Gmail(r), Ymail(r) 6176 6177 Returns a dictionary with tablename, server native mailbox name 6178 pairs. 6179 """ 6180 if mailbox_names: 6181 # optional statically declared mailboxes 6182 self.static_names = mailbox_names 6183 else: 6184 self.static_names = None 6185 if not isinstance(self.connection.mailbox_names, dict): 6186 self.get_mailboxes() 6187 6188 names = self.connection.mailbox_names.keys() 6189 6190 for name in names: 6191 self.db.define_table("%s" % name, 6192 Field("uid", "string", writable=False), 6193 Field("answered", "boolean"), 6194 Field("created", "datetime", writable=False), 6195 Field("content", "list:string", writable=False), 6196 Field("to", "string", writable=False), 6197 Field("cc", "string", writable=False), 6198 Field("bcc", "string", writable=False), 6199 Field("size", "integer", writable=False), 6200 Field("deleted", "boolean"), 6201 Field("draft", "boolean"), 6202 Field("flagged", "boolean"), 6203 Field("sender", "string", writable=False), 6204 Field("recent", "boolean", writable=False), 6205 Field("seen", "boolean"), 6206 Field("subject", "string", writable=False), 6207 Field("mime", "string", writable=False), 6208 Field("email", "string", writable=False, readable=False), 6209 Field("attachments", list, writable=False, readable=False), 6210 Field("encoding") 6211 ) 6212 6213 # Set a special _mailbox attribute for storing 6214 # native mailbox names 6215 self.db[name].mailbox = \ 6216 self.connection.mailbox_names[name] 6217 6218 # decode quoted printable 6219 self.db[name].to.represent = self.db[name].cc.represent = \ 6220 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6221 self.db[name].subject.represent = self.header_represent 6222 6223 # Set the db instance mailbox collections 6224 self.db.mailboxes = self.connection.mailbox_names 6225 return self.db.mailboxes
6226
6227 - def create_table(self, *args, **kwargs):
6228 # not implemented 6229 # but required by DAL 6230 pass
6231
6232 - def _select(self, query, fields, attributes):
6233 if use_common_filters(query): 6234 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6235 return str(query)
6236
6237 - def select(self, query, fields, attributes):
6238 """ Search and Fetch records and return web2py rows 6239 """ 6240 # move this statement elsewhere (upper-level) 6241 if use_common_filters(query): 6242 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6243 6244 import email 6245 # get records from imap server with search + fetch 6246 # convert results to a dictionary 6247 tablename = None 6248 fetch_results = list() 6249 6250 if isinstance(query, Query): 6251 tablename = self.get_table(query) 6252 mailbox = self.connection.mailbox_names.get(tablename, None) 6253 if mailbox is None: 6254 raise ValueError("Mailbox name not found: %s" % mailbox) 6255 else: 6256 # select with readonly 6257 result, selected = self.connection.select(mailbox, True) 6258 if result != "OK": 6259 raise Exception("IMAP error: %s" % selected) 6260 self.mailbox_size = int(selected[0]) 6261 search_query = "(%s)" % str(query).strip() 6262 search_result = self.connection.uid("search", None, search_query) 6263 # Normal IMAP response OK is assumed (change this) 6264 if search_result[0] == "OK": 6265 # For "light" remote server responses just get the first 6266 # ten records (change for non-experimental implementation) 6267 # However, light responses are not guaranteed with this 6268 # approach, just fewer messages. 6269 limitby = attributes.get('limitby', None) 6270 messages_set = search_result[1][0].split() 6271 # descending order 6272 messages_set.reverse() 6273 if limitby is not None: 6274 # TODO: orderby, asc/desc, limitby from complete message set 6275 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6276 6277 # keep the requests small for header/flags 6278 if any([(field.name in ["content", "size", 6279 "attachments", "email"]) for 6280 field in fields]): 6281 imap_fields = "(RFC822 FLAGS)" 6282 else: 6283 imap_fields = "(RFC822.HEADER FLAGS)" 6284 6285 if len(messages_set) > 0: 6286 # create fetch results object list 6287 # fetch each remote message and store it in memmory 6288 # (change to multi-fetch command syntax for faster 6289 # transactions) 6290 for uid in messages_set: 6291 # fetch the RFC822 message body 6292 typ, data = self.connection.uid("fetch", uid, imap_fields) 6293 if typ == "OK": 6294 fr = {"message": int(data[0][0].split()[0]), 6295 "uid": long(uid), 6296 "email": email.message_from_string(data[0][1]), 6297 "raw_message": data[0][1]} 6298 fr["multipart"] = fr["email"].is_multipart() 6299 # fetch flags for the message 6300 fr["flags"] = self.driver.ParseFlags(data[1]) 6301 fetch_results.append(fr) 6302 else: 6303 # error retrieving the message body 6304 raise Exception("IMAP error retrieving the body: %s" % data) 6305 else: 6306 raise Exception("IMAP search error: %s" % search_result[1]) 6307 elif isinstance(query, (Expression, basestring)): 6308 raise NotImplementedError() 6309 else: 6310 raise TypeError("Unexpected query type") 6311 6312 imapqry_dict = {} 6313 imapfields_dict = {} 6314 6315 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6316 allfields = True 6317 elif len(fields) == 0: 6318 allfields = True 6319 else: 6320 allfields = False 6321 if allfields: 6322 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6323 else: 6324 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6325 6326 for k in colnames: 6327 imapfields_dict[k] = k 6328 6329 imapqry_list = list() 6330 imapqry_array = list() 6331 for fr in fetch_results: 6332 attachments = [] 6333 content = [] 6334 size = 0 6335 n = int(fr["message"]) 6336 item_dict = dict() 6337 message = fr["email"] 6338 uid = fr["uid"] 6339 charset = self.get_charset(message) 6340 flags = fr["flags"] 6341 raw_message = fr["raw_message"] 6342 # Return messages data mapping static fields 6343 # and fetched results. Mapping should be made 6344 # outside the select function (with auxiliary 6345 # instance methods) 6346 6347 # pending: search flags states trough the email message 6348 # instances for correct output 6349 6350 # preserve subject encoding (ASCII/quoted printable) 6351 6352 if "%s.id" % tablename in colnames: 6353 item_dict["%s.id" % tablename] = n 6354 if "%s.created" % tablename in colnames: 6355 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6356 if "%s.uid" % tablename in colnames: 6357 item_dict["%s.uid" % tablename] = uid 6358 if "%s.sender" % tablename in colnames: 6359 # If there is no encoding found in the message header 6360 # force utf-8 replacing characters (change this to 6361 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6362 item_dict["%s.sender" % tablename] = message["From"] 6363 if "%s.to" % tablename in colnames: 6364 item_dict["%s.to" % tablename] = message["To"] 6365 if "%s.cc" % tablename in colnames: 6366 if "Cc" in message.keys(): 6367 item_dict["%s.cc" % tablename] = message["Cc"] 6368 else: 6369 item_dict["%s.cc" % tablename] = "" 6370 if "%s.bcc" % tablename in colnames: 6371 if "Bcc" in message.keys(): 6372 item_dict["%s.bcc" % tablename] = message["Bcc"] 6373 else: 6374 item_dict["%s.bcc" % tablename] = "" 6375 if "%s.deleted" % tablename in colnames: 6376 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6377 if "%s.draft" % tablename in colnames: 6378 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6379 if "%s.flagged" % tablename in colnames: 6380 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6381 if "%s.recent" % tablename in colnames: 6382 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6383 if "%s.seen" % tablename in colnames: 6384 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6385 if "%s.subject" % tablename in colnames: 6386 item_dict["%s.subject" % tablename] = message["Subject"] 6387 if "%s.answered" % tablename in colnames: 6388 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6389 if "%s.mime" % tablename in colnames: 6390 item_dict["%s.mime" % tablename] = message.get_content_type() 6391 if "%s.encoding" % tablename in colnames: 6392 item_dict["%s.encoding" % tablename] = charset 6393 6394 # Here goes the whole RFC822 body as an email instance 6395 # for controller side custom processing 6396 # The message is stored as a raw string 6397 # >> email.message_from_string(raw string) 6398 # returns a Message object for enhanced object processing 6399 if "%s.email" % tablename in colnames: 6400 # WARNING: no encoding performed (raw message) 6401 item_dict["%s.email" % tablename] = raw_message 6402 6403 # Size measure as suggested in a Velocity Reviews post 6404 # by Tim Williams: "how to get size of email attachment" 6405 # Note: len() and server RFC822.SIZE reports doesn't match 6406 # To retrieve the server size for representation would add a new 6407 # fetch transaction to the process 6408 for part in message.walk(): 6409 maintype = part.get_content_maintype() 6410 if ("%s.attachments" % tablename in colnames) or \ 6411 ("%s.content" % tablename in colnames): 6412 if "%s.attachments" % tablename in colnames: 6413 if not ("text" in maintype): 6414 payload = part.get_payload(decode=True) 6415 if payload: 6416 attachment = { 6417 "payload": payload, 6418 "filename": part.get_filename(), 6419 "encoding": part.get_content_charset(), 6420 "mime": part.get_content_type(), 6421 "disposition": part["Content-Disposition"]} 6422 attachments.append(attachment) 6423 if "%s.content" % tablename in colnames: 6424 payload = part.get_payload(decode=True) 6425 part_charset = self.get_charset(part) 6426 if "text" in maintype: 6427 if payload: 6428 content.append(self.encode_text(payload, part_charset)) 6429 if "%s.size" % tablename in colnames: 6430 if part is not None: 6431 size += len(str(part)) 6432 item_dict["%s.content" % tablename] = bar_encode(content) 6433 item_dict["%s.attachments" % tablename] = attachments 6434 item_dict["%s.size" % tablename] = size 6435 imapqry_list.append(item_dict) 6436 6437 # extra object mapping for the sake of rows object 6438 # creation (sends an array or lists) 6439 for item_dict in imapqry_list: 6440 imapqry_array_item = list() 6441 for fieldname in colnames: 6442 imapqry_array_item.append(item_dict[fieldname]) 6443 imapqry_array.append(imapqry_array_item) 6444 6445 # parse result and return a rows object 6446 colnames = colnames 6447 processor = attributes.get('processor',self.parse) 6448 return processor(imapqry_array, fields, colnames)
6449
6450 - def _update(self, tablename, query, fields, commit=False):
6451 # TODO: the adapter should implement an .expand method 6452 commands = list() 6453 if use_common_filters(query): 6454 query = self.common_filter(query, [tablename,]) 6455 mark = [] 6456 unmark = [] 6457 if query: 6458 for item in fields: 6459 field = item[0] 6460 name = field.name 6461 value = item[1] 6462 if self.is_flag(name): 6463 flag = self.search_fields[name] 6464 if (value is not None) and (flag != "\\Recent"): 6465 if value: 6466 mark.append(flag) 6467 else: 6468 unmark.append(flag) 6469 result, data = self.connection.select( 6470 self.connection.mailbox_names[tablename]) 6471 string_query = "(%s)" % query 6472 result, data = self.connection.search(None, string_query) 6473 store_list = [item.strip() for item in data[0].split() 6474 if item.strip().isdigit()] 6475 # build commands for marked flags 6476 for number in store_list: 6477 result = None 6478 if len(mark) > 0: 6479 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6480 if len(unmark) > 0: 6481 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6482 return commands
6483
6484 - def update(self, tablename, query, fields):
6485 rowcount = 0 6486 commands = self._update(tablename, query, fields) 6487 for command in commands: 6488 result, data = self.connection.store(*command) 6489 if result == "OK": 6490 rowcount += 1 6491 else: 6492 raise Exception("IMAP storing error: %s" % data) 6493 return rowcount
6494
6495 - def _count(self, query, distinct=None):
6496 raise NotImplementedError()
6497
6498 - def count(self,query,distinct=None):
6499 counter = 0 6500 tablename = self.get_query_mailbox(query) 6501 if query and tablename is not None: 6502 if use_common_filters(query): 6503 query = self.common_filter(query, [tablename,]) 6504 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6505 string_query = "(%s)" % query 6506 result, data = self.connection.search(None, string_query) 6507 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6508 counter = len(store_list) 6509 return counter
6510
6511 - def delete(self, tablename, query):
6512 counter = 0 6513 if query: 6514 if use_common_filters(query): 6515 query = self.common_filter(query, [tablename,]) 6516 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6517 string_query = "(%s)" % query 6518 result, data = self.connection.search(None, string_query) 6519 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6520 for number in store_list: 6521 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6522 if result == "OK": 6523 counter += 1 6524 else: 6525 raise Exception("IMAP store error: %s" % data) 6526 if counter > 0: 6527 result, data = self.connection.expunge() 6528 return counter
6529
6530 - def BELONGS(self, first, second):
6531 result = None 6532 name = self.search_fields[first.name] 6533 if name == "MESSAGE": 6534 values = [str(val) for val in second if str(val).isdigit()] 6535 result = "%s" % ",".join(values).strip() 6536 6537 elif name == "UID": 6538 values = [str(val) for val in second if str(val).isdigit()] 6539 result = "UID %s" % ",".join(values).strip() 6540 6541 else: 6542 raise Exception("Operation not supported") 6543 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6544 return result
6545
6546 - def CONTAINS(self, first, second, case_sensitive=False):
6547 # silently ignore, only case sensitive 6548 result = None 6549 name = self.search_fields[first.name] 6550 6551 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6552 result = "%s \"%s\"" % (name, self.expand(second)) 6553 else: 6554 if first.name in ("cc", "bcc"): 6555 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6556 elif first.name == "mime": 6557 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6558 else: 6559 raise Exception("Operation not supported") 6560 return result
6561
6562 - def GT(self, first, second):
6563 result = None 6564 name = self.search_fields[first.name] 6565 if name == "MESSAGE": 6566 last_message = self.get_last_message(first.tablename) 6567 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6568 elif name == "UID": 6569 # GT and LT may not return 6570 # expected sets depending on 6571 # the uid format implemented 6572 try: 6573 pedestal, threshold = self.get_uid_bounds(first.tablename) 6574 except TypeError: 6575 e = sys.exc_info()[1] 6576 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6577 return "" 6578 try: 6579 lower_limit = int(self.expand(second)) + 1 6580 except (ValueError, TypeError): 6581 e = sys.exc_info()[1] 6582 raise Exception("Operation not supported (non integer UID)") 6583 result = "UID %s:%s" % (lower_limit, threshold) 6584 elif name == "DATE": 6585 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6586 elif name == "SIZE": 6587 result = "LARGER %s" % self.expand(second) 6588 else: 6589 raise Exception("Operation not supported") 6590 return result
6591
6592 - def GE(self, first, second):
6593 result = None 6594 name = self.search_fields[first.name] 6595 if name == "MESSAGE": 6596 last_message = self.get_last_message(first.tablename) 6597 result = "%s:%s" % (self.expand(second), last_message) 6598 elif name == "UID": 6599 # GT and LT may not return 6600 # expected sets depending on 6601 # the uid format implemented 6602 try: 6603 pedestal, threshold = self.get_uid_bounds(first.tablename) 6604 except TypeError: 6605 e = sys.exc_info()[1] 6606 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6607 return "" 6608 lower_limit = self.expand(second) 6609 result = "UID %s:%s" % (lower_limit, threshold) 6610 elif name == "DATE": 6611 result = "SINCE %s" % self.convert_date(second) 6612 else: 6613 raise Exception("Operation not supported") 6614 return result
6615
6616 - def LT(self, first, second):
6617 result = None 6618 name = self.search_fields[first.name] 6619 if name == "MESSAGE": 6620 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6621 elif name == "UID": 6622 try: 6623 pedestal, threshold = self.get_uid_bounds(first.tablename) 6624 except TypeError: 6625 e = sys.exc_info()[1] 6626 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6627 return "" 6628 try: 6629 upper_limit = int(self.expand(second)) - 1 6630 except (ValueError, TypeError): 6631 e = sys.exc_info()[1] 6632 raise Exception("Operation not supported (non integer UID)") 6633 result = "UID %s:%s" % (pedestal, upper_limit) 6634 elif name == "DATE": 6635 result = "BEFORE %s" % self.convert_date(second) 6636 elif name == "SIZE": 6637 result = "SMALLER %s" % self.expand(second) 6638 else: 6639 raise Exception("Operation not supported") 6640 return result
6641
6642 - def LE(self, first, second):
6643 result = None 6644 name = self.search_fields[first.name] 6645 if name == "MESSAGE": 6646 result = "%s:%s" % (1, self.expand(second)) 6647 elif name == "UID": 6648 try: 6649 pedestal, threshold = self.get_uid_bounds(first.tablename) 6650 except TypeError: 6651 e = sys.exc_info()[1] 6652 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6653 return "" 6654 upper_limit = int(self.expand(second)) 6655 result = "UID %s:%s" % (pedestal, upper_limit) 6656 elif name == "DATE": 6657 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6658 else: 6659 raise Exception("Operation not supported") 6660 return result
6661
6662 - def NE(self, first, second=None):
6663 if (second is None) and isinstance(first, Field): 6664 # All records special table query 6665 if first.type == "id": 6666 return self.GE(first, 1) 6667 result = self.NOT(self.EQ(first, second)) 6668 result = result.replace("NOT NOT", "").strip() 6669 return result
6670
6671 - def EQ(self,first,second):
6672 name = self.search_fields[first.name] 6673 result = None 6674 if name is not None: 6675 if name == "MESSAGE": 6676 # query by message sequence number 6677 result = "%s" % self.expand(second) 6678 elif name == "UID": 6679 result = "UID %s" % self.expand(second) 6680 elif name == "DATE": 6681 result = "ON %s" % self.convert_date(second) 6682 6683 elif name in self.flags: 6684 if second: 6685 result = "%s" % (name.upper()[1:]) 6686 else: 6687 result = "NOT %s" % (name.upper()[1:]) 6688 else: 6689 raise Exception("Operation not supported") 6690 else: 6691 raise Exception("Operation not supported") 6692 return result
6693
6694 - def AND(self, first, second):
6695 result = "%s %s" % (self.expand(first), self.expand(second)) 6696 return result
6697
6698 - def OR(self, first, second):
6699 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6700 return "%s" % result.replace("OR OR", "OR")
6701
6702 - def NOT(self, first):
6703 result = "NOT %s" % self.expand(first) 6704 return result
6705 6706 ######################################################################## 6707 # end of adapters 6708 ######################################################################## 6709 6710 ADAPTERS = { 6711 'sqlite': SQLiteAdapter, 6712 'spatialite': SpatiaLiteAdapter, 6713 'sqlite:memory': SQLiteAdapter, 6714 'spatialite:memory': SpatiaLiteAdapter, 6715 'mysql': MySQLAdapter, 6716 'postgres': PostgreSQLAdapter, 6717 'postgres:psycopg2': PostgreSQLAdapter, 6718 'postgres:pg8000': PostgreSQLAdapter, 6719 'postgres2:psycopg2': NewPostgreSQLAdapter, 6720 'postgres2:pg8000': NewPostgreSQLAdapter, 6721 'oracle': OracleAdapter, 6722 'mssql': MSSQLAdapter, 6723 'mssql2': MSSQL2Adapter, 6724 'mssql3': MSSQL3Adapter, 6725 'vertica': VerticaAdapter, 6726 'sybase': SybaseAdapter, 6727 'db2': DB2Adapter, 6728 'teradata': TeradataAdapter, 6729 'informix': InformixAdapter, 6730 'informix-se': InformixSEAdapter, 6731 'firebird': FireBirdAdapter, 6732 'firebird_embedded': FireBirdAdapter, 6733 'ingres': IngresAdapter, 6734 'ingresu': IngresUnicodeAdapter, 6735 'sapdb': SAPDBAdapter, 6736 'cubrid': CubridAdapter, 6737 'jdbc:sqlite': JDBCSQLiteAdapter, 6738 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6739 'jdbc:postgres': JDBCPostgreSQLAdapter, 6740 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6741 'google:datastore': GoogleDatastoreAdapter, 6742 'google:sql': GoogleSQLAdapter, 6743 'couchdb': CouchDBAdapter, 6744 'mongodb': MongoDBAdapter, 6745 'imap': IMAPAdapter 6746 }
6747 6748 -def sqlhtml_validators(field):
6749 """ 6750 Field type validation, using web2py's validators mechanism. 6751 6752 makes sure the content of a field is in line with the declared 6753 fieldtype 6754 """ 6755 db = field.db 6756 if not have_validators: 6757 return [] 6758 field_type, field_length = field.type, field.length 6759 if isinstance(field_type, SQLCustomType): 6760 if hasattr(field_type, 'validator'): 6761 return field_type.validator 6762 else: 6763 field_type = field_type.type 6764 elif not isinstance(field_type,str): 6765 return [] 6766 requires=[] 6767 def ff(r,id): 6768 row=r(id) 6769 if not row: 6770 return id 6771 elif hasattr(r, '_format') and isinstance(r._format,str): 6772 return r._format % row 6773 elif hasattr(r, '_format') and callable(r._format): 6774 return r._format(row) 6775 else: 6776 return id
6777 if field_type in (('string', 'text', 'password')): 6778 requires.append(validators.IS_LENGTH(field_length)) 6779 elif field_type == 'json': 6780 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 6781 elif field_type == 'double' or field_type == 'float': 6782 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6783 elif field_type in ('integer','bigint'): 6784 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6785 elif field_type.startswith('decimal'): 6786 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6787 elif field_type == 'date': 6788 requires.append(validators.IS_DATE()) 6789 elif field_type == 'time': 6790 requires.append(validators.IS_TIME()) 6791 elif field_type == 'datetime': 6792 requires.append(validators.IS_DATETIME()) 6793 elif db and field_type.startswith('reference') and \ 6794 field_type.find('.') < 0 and \ 6795 field_type[10:] in db.tables: 6796 referenced = db[field_type[10:]] 6797 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6798 field.represent = field.represent or repr_ref 6799 if hasattr(referenced, '_format') and referenced._format: 6800 requires = validators.IS_IN_DB(db,referenced._id, 6801 referenced._format) 6802 if field.unique: 6803 requires._and = validators.IS_NOT_IN_DB(db,field) 6804 if field.tablename == field_type[10:]: 6805 return validators.IS_EMPTY_OR(requires) 6806 return requires 6807 elif db and field_type.startswith('list:reference') and \ 6808 field_type.find('.') < 0 and \ 6809 field_type[15:] in db.tables: 6810 referenced = db[field_type[15:]] 6811 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6812 if not ids: 6813 return None 6814 refs = None 6815 db, id = r._db, r._id 6816 if isinstance(db._adapter, GoogleDatastoreAdapter): 6817 def count(values): return db(id.belongs(values)).select(id) 6818 rx = range(0, len(ids), 30) 6819 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6820 else: 6821 refs = db(id.belongs(ids)).select(id) 6822 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6823 field.represent = field.represent or list_ref_repr 6824 if hasattr(referenced, '_format') and referenced._format: 6825 requires = validators.IS_IN_DB(db,referenced._id, 6826 referenced._format,multiple=True) 6827 else: 6828 requires = validators.IS_IN_DB(db,referenced._id, 6829 multiple=True) 6830 if field.unique: 6831 requires._and = validators.IS_NOT_IN_DB(db,field) 6832 return requires 6833 elif field_type.startswith('list:'): 6834 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6835 field.represent = field.represent or repr_list 6836 if field.unique: 6837 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6838 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6839 if field.notnull and not field_type[:2] in sff: 6840 requires.insert(0, validators.IS_NOT_EMPTY()) 6841 elif not field.notnull and field_type[:2] in sff and requires: 6842 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6843 return requires 6844
6845 6846 -def bar_escape(item):
6847 return str(item).replace('|', '||')
6848
6849 -def bar_encode(items):
6850 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6851
6852 -def bar_decode_integer(value):
6853 if not hasattr(value,'split') and hasattr(value,'read'): 6854 value = value.read() 6855 return [long(x) for x in value.split('|') if x.strip()]
6856
6857 -def bar_decode_string(value):
6858 return [x.replace('||', '|') for x in 6859 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6860
6861 6862 -class Row(object):
6863 6864 """ 6865 a dictionary that lets you do d['a'] as well as d.a 6866 this is only used to store a Row 6867 """ 6868
6869 - def __init__(self,*args,**kwargs):
6870 self.__dict__.update(*args,**kwargs)
6871
6872 - def __getitem__(self, key):
6873 key=str(key) 6874 m = REGEX_TABLE_DOT_FIELD.match(key) 6875 if key in self.get('_extra',{}): 6876 return self._extra[key] 6877 elif m: 6878 try: 6879 return ogetattr(self, m.group(1))[m.group(2)] 6880 except (KeyError,AttributeError,TypeError): 6881 key = m.group(2) 6882 return ogetattr(self, key)
6883
6884 - def __setitem__(self, key, value):
6885 setattr(self, str(key), value)
6886 6887 __delitem__ = delattr 6888 6889 __copy__ = lambda self: Row(self) 6890 6891 __call__ = __getitem__ 6892
6893 - def get(self,key,default=None):
6894 return self.__dict__.get(key,default)
6895
6896 - def __contains__(self,key):
6897 return key in self.__dict__
6898 6899 has_key = __contains__ 6900
6901 - def __nonzero__(self):
6902 return len(self.__dict__)>0
6903
6904 - def update(self, *args, **kwargs):
6905 self.__dict__.update(*args, **kwargs)
6906
6907 - def keys(self):
6908 return self.__dict__.keys()
6909
6910 - def items(self):
6911 return self.__dict__.items()
6912
6913 - def values(self):
6914 return self.__dict__.values()
6915
6916 - def __iter__(self):
6917 return self.__dict__.__iter__()
6918
6919 - def iteritems(self):
6920 return self.__dict__.iteritems()
6921
6922 - def __str__(self):
6923 ### this could be made smarter 6924 return '<Row %s>' % self.as_dict()
6925
6926 - def __repr__(self):
6927 return '<Row %s>' % self.as_dict()
6928
6929 - def __int__(self):
6930 return object.__getattribute__(self,'id')
6931
6932 - def __long__(self):
6933 return long(object.__getattribute__(self,'id'))
6934
6935 - def __eq__(self,other):
6936 try: 6937 return self.as_dict() == other.as_dict() 6938 except AttributeError: 6939 return False
6940
6941 - def __ne__(self,other):
6942 return not (self == other)
6943
6944 - def __copy__(self):
6945 return Row(dict(self))
6946
6947 - def as_dict(self, datetime_to_str=False, custom_types=None):
6948 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6949 if isinstance(custom_types,(list,tuple,set)): 6950 SERIALIZABLE_TYPES += custom_types 6951 elif custom_types: 6952 SERIALIZABLE_TYPES.append(custom_types) 6953 d = dict(self) 6954 for k in copy.copy(d.keys()): 6955 v=d[k] 6956 if d[k] is None: 6957 continue 6958 elif isinstance(v,Row): 6959 d[k]=v.as_dict() 6960 elif isinstance(v,Reference): 6961 d[k]=long(v) 6962 elif isinstance(v,decimal.Decimal): 6963 d[k]=float(v) 6964 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6965 if datetime_to_str: 6966 d[k] = v.isoformat().replace('T',' ')[:19] 6967 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6968 del d[k] 6969 return d
6970
6971 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6972 def f(row,field,indent=' '): 6973 if isinstance(row,Row): 6974 spc = indent+' \n' 6975 items = [f(row[x],x,indent+' ') for x in row] 6976 return '%s<%s>\n%s\n%s</%s>' % ( 6977 indent, 6978 field, 6979 spc.join(item for item in items if item), 6980 indent, 6981 field) 6982 elif not callable(row): 6983 if REGEX_ALPHANUMERIC.match(field): 6984 return '%s<%s>%s</%s>' % (indent,field,row,field) 6985 else: 6986 return '%s<extra name="%s">%s</extra>' % \ 6987 (indent,field,row) 6988 else: 6989 return None
6990 return f(self, row_name, indent=indent)
6991
6992 - def as_json(self, mode="object", default=None, colnames=None, 6993 serialize=True, **kwargs):
6994 """ 6995 serializes the table to a JSON list of objects 6996 kwargs are passed to .as_dict method 6997 only "object" mode supported for single row 6998 6999 serialize = False used by Rows.as_json 7000 TODO: return array mode with query column order 7001 """ 7002 7003 def inner_loop(record, col): 7004 (t, f) = col.split('.') 7005 res = None 7006 if not REGEX_TABLE_DOT_FIELD.match(col): 7007 key = col 7008 res = record._extra[col] 7009 else: 7010 key = f 7011 if isinstance(record.get(t, None), Row): 7012 res = record[t][f] 7013 else: 7014 res = record[f] 7015 if mode == 'object': 7016 return (key, res) 7017 else: 7018 return res
7019 7020 multi = any([isinstance(v, self.__class__) for v in self.values()]) 7021 mode = mode.lower() 7022 if not mode in ['object', 'array']: 7023 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 7024 7025 if mode=='object' and colnames: 7026 item = dict([inner_loop(self, col) for col in colnames]) 7027 elif colnames: 7028 item = [inner_loop(self, col) for col in colnames] 7029 else: 7030 if not mode == 'object': 7031 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 7032 7033 if multi: 7034 item = dict() 7035 [item.update(**v.as_dict(**kwargs)) for v in self.values()] 7036 else: 7037 item = self.as_dict(**kwargs) 7038 7039 if serialize: 7040 if have_serializers: 7041 return serializers.json(item, 7042 default=default or 7043 serializers.custom_json) 7044 elif simplejson: 7045 return simplejson.dumps(item) 7046 else: 7047 raise RuntimeError("missing simplejson") 7048 else: 7049 return item 7050
7051 7052 ################################################################################ 7053 # Everything below should be independent of the specifics of the database 7054 # and should work for RDBMs and some NoSQL databases 7055 ################################################################################ 7056 7057 -class SQLCallableList(list):
7058 - def __call__(self):
7059 return copy.copy(self)
7060
7061 -def smart_query(fields,text):
7062 if not isinstance(fields,(list,tuple)): 7063 fields = [fields] 7064 new_fields = [] 7065 for field in fields: 7066 if isinstance(field,Field): 7067 new_fields.append(field) 7068 elif isinstance(field,Table): 7069 for ofield in field: 7070 new_fields.append(ofield) 7071 else: 7072 raise RuntimeError("fields must be a list of fields") 7073 fields = new_fields 7074 field_map = {} 7075 for field in fields: 7076 n = field.name.lower() 7077 if not n in field_map: 7078 field_map[n] = field 7079 n = str(field).lower() 7080 if not n in field_map: 7081 field_map[n] = field 7082 constants = {} 7083 i = 0 7084 while True: 7085 m = REGEX_CONST_STRING.search(text) 7086 if not m: break 7087 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7088 constants[str(i)] = m.group()[1:-1] 7089 i+=1 7090 text = re.sub('\s+',' ',text).lower() 7091 for a,b in [('&','and'), 7092 ('|','or'), 7093 ('~','not'), 7094 ('==','='), 7095 ('<','<'), 7096 ('>','>'), 7097 ('<=','<='), 7098 ('>=','>='), 7099 ('<>','!='), 7100 ('=<','<='), 7101 ('=>','>='), 7102 ('=','='), 7103 (' less or equal than ','<='), 7104 (' greater or equal than ','>='), 7105 (' equal or less than ','<='), 7106 (' equal or greater than ','>='), 7107 (' less or equal ','<='), 7108 (' greater or equal ','>='), 7109 (' equal or less ','<='), 7110 (' equal or greater ','>='), 7111 (' not equal to ','!='), 7112 (' not equal ','!='), 7113 (' equal to ','='), 7114 (' equal ','='), 7115 (' equals ','='), 7116 (' less than ','<'), 7117 (' greater than ','>'), 7118 (' starts with ','startswith'), 7119 (' ends with ','endswith'), 7120 (' not in ' , 'notbelongs'), 7121 (' in ' , 'belongs'), 7122 (' is ','=')]: 7123 if a[0]==' ': 7124 text = text.replace(' is'+a,' %s ' % b) 7125 text = text.replace(a,' %s ' % b) 7126 text = re.sub('\s+',' ',text).lower() 7127 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7128 query = field = neg = op = logic = None 7129 for item in text.split(): 7130 if field is None: 7131 if item == 'not': 7132 neg = True 7133 elif not neg and not logic and item in ('and','or'): 7134 logic = item 7135 elif item in field_map: 7136 field = field_map[item] 7137 else: 7138 raise RuntimeError("Invalid syntax") 7139 elif not field is None and op is None: 7140 op = item 7141 elif not op is None: 7142 if item.startswith('#'): 7143 if not item[1:] in constants: 7144 raise RuntimeError("Invalid syntax") 7145 value = constants[item[1:]] 7146 else: 7147 value = item 7148 if field.type in ('text', 'string', 'json'): 7149 if op == '=': op = 'like' 7150 if op == '=': new_query = field==value 7151 elif op == '<': new_query = field<value 7152 elif op == '>': new_query = field>value 7153 elif op == '<=': new_query = field<=value 7154 elif op == '>=': new_query = field>=value 7155 elif op == '!=': new_query = field!=value 7156 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7157 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7158 elif field.type in ('text', 'string', 'json'): 7159 if op == 'contains': new_query = field.contains(value) 7160 elif op == 'like': new_query = field.like(value) 7161 elif op == 'startswith': new_query = field.startswith(value) 7162 elif op == 'endswith': new_query = field.endswith(value) 7163 else: raise RuntimeError("Invalid operation") 7164 elif field._db._adapter.dbengine=='google:datastore' and \ 7165 field.type in ('list:integer', 'list:string', 'list:reference'): 7166 if op == 'contains': new_query = field.contains(value) 7167 else: raise RuntimeError("Invalid operation") 7168 else: raise RuntimeError("Invalid operation") 7169 if neg: new_query = ~new_query 7170 if query is None: 7171 query = new_query 7172 elif logic == 'and': 7173 query &= new_query 7174 elif logic == 'or': 7175 query |= new_query 7176 field = op = neg = logic = None 7177 return query
7178
7179 -class DAL(object):
7180 7181 """ 7182 an instance of this class represents a database connection 7183 7184 Example:: 7185 7186 db = DAL('sqlite://test.db') 7187 7188 or 7189 7190 db = DAL({"uri": ..., "items": ...}) # experimental 7191 7192 db.define_table('tablename', Field('fieldname1'), 7193 Field('fieldname2')) 7194 """ 7195
7196 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7197 if not hasattr(THREAD_LOCAL,'db_instances'): 7198 THREAD_LOCAL.db_instances = {} 7199 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7200 THREAD_LOCAL.db_instances_zombie = {} 7201 if uri == '<zombie>': 7202 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7203 if db_uid in THREAD_LOCAL.db_instances: 7204 db_group = THREAD_LOCAL.db_instances[db_uid] 7205 db = db_group[-1] 7206 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7207 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7208 else: 7209 db = super(DAL, cls).__new__(cls) 7210 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7211 else: 7212 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7213 if db_uid in THREAD_LOCAL.db_instances_zombie: 7214 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7215 del THREAD_LOCAL.db_instances_zombie[db_uid] 7216 else: 7217 db = super(DAL, cls).__new__(cls) 7218 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7219 db_group.append(db) 7220 THREAD_LOCAL.db_instances[db_uid] = db_group 7221 db._db_uid = db_uid 7222 return db
7223 7224 @staticmethod
7225 - def set_folder(folder):
7226 """ 7227 # ## this allows gluon to set a folder for this thread 7228 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7229 """ 7230 BaseAdapter.set_folder(folder)
7231 7232 @staticmethod
7233 - def get_instances():
7234 """ 7235 Returns a dictionary with uri as key with timings and defined tables 7236 {'sqlite://storage.sqlite': { 7237 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7238 'dbtables': { 7239 'defined': ['auth_cas', 'auth_event', 'auth_group', 7240 'auth_membership', 'auth_permission', 'auth_user'], 7241 'lazy': '[]' 7242 } 7243 } 7244 } 7245 """ 7246 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7247 infos = {} 7248 for db_uid, db_group in dbs: 7249 for db in db_group: 7250 if not db._uri: 7251 continue 7252 k = hide_password(db._uri) 7253 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7254 dbtables = {'defined': 7255 sorted(list(set(db.tables) - 7256 set(db._LAZY_TABLES.keys()))), 7257 'lazy': sorted(db._LAZY_TABLES.keys())} 7258 ) 7259 return infos
7260 7261 @staticmethod
7262 - def distributed_transaction_begin(*instances):
7263 if not instances: 7264 return 7265 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7266 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7267 instances = enumerate(instances) 7268 for (i, db) in instances: 7269 if not db._adapter.support_distributed_transaction(): 7270 raise SyntaxError( 7271 'distributed transaction not suported by %s' % db._dbname) 7272 for (i, db) in instances: 7273 db._adapter.distributed_transaction_begin(keys[i])
7274 7275 @staticmethod
7276 - def distributed_transaction_commit(*instances):
7277 if not instances: 7278 return 7279 instances = enumerate(instances) 7280 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7281 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7282 for (i, db) in instances: 7283 if not db._adapter.support_distributed_transaction(): 7284 raise SyntaxError( 7285 'distributed transaction not suported by %s' % db._dbanme) 7286 try: 7287 for (i, db) in instances: 7288 db._adapter.prepare(keys[i]) 7289 except: 7290 for (i, db) in instances: 7291 db._adapter.rollback_prepared(keys[i]) 7292 raise RuntimeError('failure to commit distributed transaction') 7293 else: 7294 for (i, db) in instances: 7295 db._adapter.commit_prepared(keys[i]) 7296 return
7297
7298 - def __init__(self, uri=DEFAULT_URI, 7299 pool_size=0, folder=None, 7300 db_codec='UTF-8', check_reserved=None, 7301 migrate=True, fake_migrate=False, 7302 migrate_enabled=True, fake_migrate_all=False, 7303 decode_credentials=False, driver_args=None, 7304 adapter_args=None, attempts=5, auto_import=False, 7305 bigint_id=False,debug=False,lazy_tables=False, 7306 db_uid=None, do_connect=True, after_connection=None):
7307 """ 7308 Creates a new Database Abstraction Layer instance. 7309 7310 Keyword arguments: 7311 7312 :uri: string that contains information for connecting to a database. 7313 (default: 'sqlite://dummy.db') 7314 7315 experimental: you can specify a dictionary as uri 7316 parameter i.e. with 7317 db = DAL({"uri": "sqlite://storage.sqlite", 7318 "items": {...}, ...}) 7319 7320 for an example of dict input you can check the output 7321 of the scaffolding db model with 7322 7323 db.as_dict() 7324 7325 Note that for compatibility with Python older than 7326 version 2.6.5 you should cast your dict input keys 7327 to str due to a syntax limitation on kwarg names. 7328 for proper DAL dictionary input you can use one of: 7329 7330 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7331 7332 or else (for parsing json input) 7333 7334 obj = serializers.loads_json(data, unicode_keys=False) 7335 7336 :pool_size: How many open connections to make to the database object. 7337 :folder: where .table files will be created. 7338 automatically set within web2py 7339 use an explicit path when using DAL outside web2py 7340 :db_codec: string encoding of the database (default: 'UTF-8') 7341 :check_reserved: list of adapters to check tablenames and column names 7342 against sql/nosql reserved keywords. (Default None) 7343 7344 * 'common' List of sql keywords that are common to all database types 7345 such as "SELECT, INSERT". (recommended) 7346 * 'all' Checks against all known SQL keywords. (not recommended) 7347 <adaptername> Checks against the specific adapters list of keywords 7348 (recommended) 7349 * '<adaptername>_nonreserved' Checks against the specific adapters 7350 list of nonreserved keywords. (if available) 7351 :migrate (defaults to True) sets default migrate behavior for all tables 7352 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7353 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7354 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7355 :attempts (defaults to 5). Number of times to attempt connecting 7356 :auto_import (defaults to False). If set, import automatically table definitions from the 7357 databases folder 7358 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7359 :lazy_tables (defaults to False): delay table definition until table access 7360 :after_connection (defaults to None): a callable that will be execute after the connection 7361 """ 7362 7363 items = None 7364 if isinstance(uri, dict): 7365 if "items" in uri: 7366 items = uri.pop("items") 7367 try: 7368 newuri = uri.pop("uri") 7369 except KeyError: 7370 newuri = DEFAULT_URI 7371 locals().update(uri) 7372 uri = newuri 7373 7374 if uri == '<zombie>' and db_uid is not None: return 7375 if not decode_credentials: 7376 credential_decoder = lambda cred: cred 7377 else: 7378 credential_decoder = lambda cred: urllib.unquote(cred) 7379 self._folder = folder 7380 if folder: 7381 self.set_folder(folder) 7382 self._uri = uri 7383 self._pool_size = pool_size 7384 self._db_codec = db_codec 7385 self._lastsql = '' 7386 self._timings = [] 7387 self._pending_references = {} 7388 self._request_tenant = 'request_tenant' 7389 self._common_fields = [] 7390 self._referee_name = '%(table)s' 7391 self._bigint_id = bigint_id 7392 self._debug = debug 7393 self._migrated = [] 7394 self._LAZY_TABLES = {} 7395 self._lazy_tables = lazy_tables 7396 self._tables = SQLCallableList() 7397 self._driver_args = driver_args 7398 self._adapter_args = adapter_args 7399 self._check_reserved = check_reserved 7400 self._decode_credentials = decode_credentials 7401 self._attempts = attempts 7402 self._do_connect = do_connect 7403 7404 if not str(attempts).isdigit() or attempts < 0: 7405 attempts = 5 7406 if uri: 7407 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7408 error = '' 7409 connected = False 7410 for k in range(attempts): 7411 for uri in uris: 7412 try: 7413 if is_jdbc and not uri.startswith('jdbc:'): 7414 uri = 'jdbc:'+uri 7415 self._dbname = REGEX_DBNAME.match(uri).group() 7416 if not self._dbname in ADAPTERS: 7417 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7418 # notice that driver args or {} else driver_args 7419 # defaults to {} global, not correct 7420 kwargs = dict(db=self,uri=uri, 7421 pool_size=pool_size, 7422 folder=folder, 7423 db_codec=db_codec, 7424 credential_decoder=credential_decoder, 7425 driver_args=driver_args or {}, 7426 adapter_args=adapter_args or {}, 7427 do_connect=do_connect, 7428 after_connection=after_connection) 7429 self._adapter = ADAPTERS[self._dbname](**kwargs) 7430 types = ADAPTERS[self._dbname].types 7431 # copy so multiple DAL() possible 7432 self._adapter.types = copy.copy(types) 7433 if bigint_id: 7434 if 'big-id' in types and 'reference' in types: 7435 self._adapter.types['id'] = types['big-id'] 7436 self._adapter.types['reference'] = types['big-reference'] 7437 connected = True 7438 break 7439 except SyntaxError: 7440 raise 7441 except Exception: 7442 tb = traceback.format_exc() 7443 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7444 if connected: 7445 break 7446 else: 7447 time.sleep(1) 7448 if not connected: 7449 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7450 else: 7451 self._adapter = BaseAdapter(db=self,pool_size=0, 7452 uri='None',folder=folder, 7453 db_codec=db_codec, after_connection=after_connection) 7454 migrate = fake_migrate = False 7455 adapter = self._adapter 7456 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7457 self.check_reserved = check_reserved 7458 if self.check_reserved: 7459 from reserved_sql_keywords import ADAPTERS as RSK 7460 self.RSK = RSK 7461 self._migrate = migrate 7462 self._fake_migrate = fake_migrate 7463 self._migrate_enabled = migrate_enabled 7464 self._fake_migrate_all = fake_migrate_all 7465 if auto_import or items: 7466 self.import_table_definitions(adapter.folder, 7467 items=items)
7468 7469 @property
7470 - def tables(self):
7471 return self._tables
7472
7473 - def import_table_definitions(self, path, migrate=False, 7474 fake_migrate=False, items=None):
7475 pattern = pjoin(path,self._uri_hash+'_*.table') 7476 if items: 7477 for tablename, table in items.iteritems(): 7478 # TODO: read all field/table options 7479 fields = [] 7480 # remove unsupported/illegal Table arguments 7481 [table.pop(name) for name in ("name", "fields") if 7482 name in table] 7483 if "items" in table: 7484 for fieldname, field in table.pop("items").iteritems(): 7485 # remove unsupported/illegal Field arguments 7486 [field.pop(key) for key in ("requires", "name", 7487 "compute", "colname") if key in field] 7488 fields.append(Field(str(fieldname), **field)) 7489 self.define_table(str(tablename), *fields, **table) 7490 else: 7491 for filename in glob.glob(pattern): 7492 tfile = self._adapter.file_open(filename, 'r') 7493 try: 7494 sql_fields = pickle.load(tfile) 7495 name = filename[len(pattern)-7:-6] 7496 mf = [(value['sortable'], 7497 Field(key, 7498 type=value['type'], 7499 length=value.get('length',None), 7500 notnull=value.get('notnull',False), 7501 unique=value.get('unique',False))) \ 7502 for key, value in sql_fields.iteritems()] 7503 mf.sort(lambda a,b: cmp(a[0],b[0])) 7504 self.define_table(name,*[item[1] for item in mf], 7505 **dict(migrate=migrate, 7506 fake_migrate=fake_migrate)) 7507 finally: 7508 self._adapter.file_close(tfile)
7509
7510 - def check_reserved_keyword(self, name):
7511 """ 7512 Validates ``name`` against SQL keywords 7513 Uses self.check_reserve which is a list of 7514 operators to use. 7515 self.check_reserved 7516 ['common', 'postgres', 'mysql'] 7517 self.check_reserved 7518 ['all'] 7519 """ 7520 for backend in self.check_reserved: 7521 if name.upper() in self.RSK[backend]: 7522 raise SyntaxError( 7523 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7524
7525 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7526 """ 7527 EXAMPLE: 7528 7529 db.define_table('person',Field('name'),Field('info')) 7530 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7531 7532 @request.restful() 7533 def index(): 7534 def GET(*args,**vars): 7535 patterns = [ 7536 "/friends[person]", 7537 "/{person.name}/:field", 7538 "/{person.name}/pets[pet.ownedby]", 7539 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7540 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7541 ("/dogs[pet]", db.pet.info=='dog'), 7542 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7543 ] 7544 parser = db.parse_as_rest(patterns,args,vars) 7545 if parser.status == 200: 7546 return dict(content=parser.response) 7547 else: 7548 raise HTTP(parser.status,parser.error) 7549 7550 def POST(table_name,**vars): 7551 if table_name == 'person': 7552 return db.person.validate_and_insert(**vars) 7553 elif table_name == 'pet': 7554 return db.pet.validate_and_insert(**vars) 7555 else: 7556 raise HTTP(400) 7557 return locals() 7558 """ 7559 7560 db = self 7561 re1 = REGEX_SEARCH_PATTERN 7562 re2 = REGEX_SQUARE_BRACKETS 7563 7564 def auto_table(table,base='',depth=0): 7565 patterns = [] 7566 for field in db[table].fields: 7567 if base: 7568 tag = '%s/%s' % (base,field.replace('_','-')) 7569 else: 7570 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7571 f = db[table][field] 7572 if not f.readable: continue 7573 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7574 tag += '/{%s.%s}' % (table,field) 7575 patterns.append(tag) 7576 patterns.append(tag+'/:field') 7577 elif f.type.startswith('boolean'): 7578 tag += '/{%s.%s}' % (table,field) 7579 patterns.append(tag) 7580 patterns.append(tag+'/:field') 7581 elif f.type in ('float','double','integer','bigint'): 7582 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7583 patterns.append(tag) 7584 patterns.append(tag+'/:field') 7585 elif f.type.startswith('list:'): 7586 tag += '/{%s.%s.contains}' % (table,field) 7587 patterns.append(tag) 7588 patterns.append(tag+'/:field') 7589 elif f.type in ('date','datetime'): 7590 tag+= '/{%s.%s.year}' % (table,field) 7591 patterns.append(tag) 7592 patterns.append(tag+'/:field') 7593 tag+='/{%s.%s.month}' % (table,field) 7594 patterns.append(tag) 7595 patterns.append(tag+'/:field') 7596 tag+='/{%s.%s.day}' % (table,field) 7597 patterns.append(tag) 7598 patterns.append(tag+'/:field') 7599 if f.type in ('datetime','time'): 7600 tag+= '/{%s.%s.hour}' % (table,field) 7601 patterns.append(tag) 7602 patterns.append(tag+'/:field') 7603 tag+='/{%s.%s.minute}' % (table,field) 7604 patterns.append(tag) 7605 patterns.append(tag+'/:field') 7606 tag+='/{%s.%s.second}' % (table,field) 7607 patterns.append(tag) 7608 patterns.append(tag+'/:field') 7609 if depth>0: 7610 for f in db[table]._referenced_by: 7611 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7612 patterns.append(tag) 7613 patterns += auto_table(table,base=tag,depth=depth-1) 7614 return patterns
7615 7616 if patterns == 'auto': 7617 patterns=[] 7618 for table in db.tables: 7619 if not table.startswith('auth_'): 7620 patterns.append('/%s[%s]' % (table,table)) 7621 patterns += auto_table(table,base='',depth=1) 7622 else: 7623 i = 0 7624 while i<len(patterns): 7625 pattern = patterns[i] 7626 if not isinstance(pattern,str): 7627 pattern = pattern[0] 7628 tokens = pattern.split('/') 7629 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7630 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7631 '/'.join(tokens[:-1])) 7632 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7633 i += len(new_patterns) 7634 else: 7635 i += 1 7636 if '/'.join(args) == 'patterns': 7637 return Row({'status':200,'pattern':'list', 7638 'error':None,'response':patterns}) 7639 for pattern in patterns: 7640 basequery, exposedfields = None, [] 7641 if isinstance(pattern,tuple): 7642 if len(pattern)==2: 7643 pattern, basequery = pattern 7644 elif len(pattern)>2: 7645 pattern, basequery, exposedfields = pattern[0:3] 7646 otable=table=None 7647 if not isinstance(queries,dict): 7648 dbset=db(queries) 7649 if basequery is not None: 7650 dbset = dbset(basequery) 7651 i=0 7652 tags = pattern[1:].split('/') 7653 if len(tags)!=len(args): 7654 continue 7655 for tag in tags: 7656 if re1.match(tag): 7657 # print 're1:'+tag 7658 tokens = tag[1:-1].split('.') 7659 table, field = tokens[0], tokens[1] 7660 if not otable or table == otable: 7661 if len(tokens)==2 or tokens[2]=='eq': 7662 query = db[table][field]==args[i] 7663 elif tokens[2]=='ne': 7664 query = db[table][field]!=args[i] 7665 elif tokens[2]=='lt': 7666 query = db[table][field]<args[i] 7667 elif tokens[2]=='gt': 7668 query = db[table][field]>args[i] 7669 elif tokens[2]=='ge': 7670 query = db[table][field]>=args[i] 7671 elif tokens[2]=='le': 7672 query = db[table][field]<=args[i] 7673 elif tokens[2]=='year': 7674 query = db[table][field].year()==args[i] 7675 elif tokens[2]=='month': 7676 query = db[table][field].month()==args[i] 7677 elif tokens[2]=='day': 7678 query = db[table][field].day()==args[i] 7679 elif tokens[2]=='hour': 7680 query = db[table][field].hour()==args[i] 7681 elif tokens[2]=='minute': 7682 query = db[table][field].minutes()==args[i] 7683 elif tokens[2]=='second': 7684 query = db[table][field].seconds()==args[i] 7685 elif tokens[2]=='startswith': 7686 query = db[table][field].startswith(args[i]) 7687 elif tokens[2]=='contains': 7688 query = db[table][field].contains(args[i]) 7689 else: 7690 raise RuntimeError("invalid pattern: %s" % pattern) 7691 if len(tokens)==4 and tokens[3]=='not': 7692 query = ~query 7693 elif len(tokens)>=4: 7694 raise RuntimeError("invalid pattern: %s" % pattern) 7695 if not otable and isinstance(queries,dict): 7696 dbset = db(queries[table]) 7697 if basequery is not None: 7698 dbset = dbset(basequery) 7699 dbset=dbset(query) 7700 else: 7701 raise RuntimeError("missing relation in pattern: %s" % pattern) 7702 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7703 ref = tag[tag.find('[')+1:-1] 7704 if '.' in ref and otable: 7705 table,field = ref.split('.') 7706 selfld = '_id' 7707 if db[table][field].type.startswith('reference '): 7708 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7709 else: 7710 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7711 if refs: 7712 selfld = refs[0] 7713 if nested_select: 7714 try: 7715 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7716 except ValueError: 7717 return Row({'status':400,'pattern':pattern, 7718 'error':'invalid path','response':None}) 7719 else: 7720 items = [item.id for item in dbset.select(db[otable][selfld])] 7721 dbset=db(db[table][field].belongs(items)) 7722 else: 7723 table = ref 7724 if not otable and isinstance(queries,dict): 7725 dbset = db(queries[table]) 7726 dbset=dbset(db[table]) 7727 elif tag==':field' and table: 7728 # print 're3:'+tag 7729 field = args[i] 7730 if not field in db[table]: break 7731 # hand-built patterns should respect .readable=False as well 7732 if not db[table][field].readable: 7733 return Row({'status':418,'pattern':pattern, 7734 'error':'I\'m a teapot','response':None}) 7735 try: 7736 distinct = vars.get('distinct', False) == 'True' 7737 offset = long(vars.get('offset',None) or 0) 7738 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7739 except ValueError: 7740 return Row({'status':400,'error':'invalid limits','response':None}) 7741 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7742 if items: 7743 return Row({'status':200,'response':items, 7744 'pattern':pattern}) 7745 else: 7746 return Row({'status':404,'pattern':pattern, 7747 'error':'no record found','response':None}) 7748 elif tag != args[i]: 7749 break 7750 otable = table 7751 i += 1 7752 if i==len(tags) and table: 7753 ofields = vars.get('order',db[table]._id.name).split('|') 7754 try: 7755 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7756 except (KeyError, AttributeError): 7757 return Row({'status':400,'error':'invalid orderby','response':None}) 7758 if exposedfields: 7759 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7760 else: 7761 fields = [field for field in db[table] if field.readable] 7762 count = dbset.count() 7763 try: 7764 offset = long(vars.get('offset',None) or 0) 7765 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7766 except ValueError: 7767 return Row({'status':400,'error':'invalid limits','response':None}) 7768 if count > limits[1]-limits[0]: 7769 return Row({'status':400,'error':'too many records','response':None}) 7770 try: 7771 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7772 except ValueError: 7773 return Row({'status':400,'pattern':pattern, 7774 'error':'invalid path','response':None}) 7775 return Row({'status':200,'response':response, 7776 'pattern':pattern,'count':count}) 7777 return Row({'status':400,'error':'no matching pattern','response':None})
7778
7779 - def define_table( 7780 self, 7781 tablename, 7782 *fields, 7783 **args 7784 ):
7785 if not isinstance(tablename,str): 7786 raise SyntaxError("missing table name") 7787 elif hasattr(self,tablename) or tablename in self.tables: 7788 if not args.get('redefine',False): 7789 raise SyntaxError('table already defined: %s' % tablename) 7790 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7791 REGEX_PYTHON_KEYWORDS.match(tablename): 7792 raise SyntaxError('invalid table name: %s' % tablename) 7793 elif self.check_reserved: 7794 self.check_reserved_keyword(tablename) 7795 else: 7796 invalid_args = set(args)-TABLE_ARGS 7797 if invalid_args: 7798 raise SyntaxError('invalid table "%s" attributes: %s' \ 7799 % (tablename,invalid_args)) 7800 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7801 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7802 table = None 7803 else: 7804 table = self.lazy_define_table(tablename,*fields,**args) 7805 if not tablename in self.tables: 7806 self.tables.append(tablename) 7807 return table
7808
7809 - def lazy_define_table( 7810 self, 7811 tablename, 7812 *fields, 7813 **args 7814 ):
7815 args_get = args.get 7816 common_fields = self._common_fields 7817 if common_fields: 7818 fields = list(fields) + list(common_fields) 7819 7820 table_class = args_get('table_class',Table) 7821 table = table_class(self, tablename, *fields, **args) 7822 table._actual = True 7823 self[tablename] = table 7824 # must follow above line to handle self references 7825 table._create_references() 7826 for field in table: 7827 if field.requires == DEFAULT: 7828 field.requires = sqlhtml_validators(field) 7829 7830 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7831 if migrate and not self._uri in (None,'None') \ 7832 or self._adapter.dbengine=='google:datastore': 7833 fake_migrate = self._fake_migrate_all or \ 7834 args_get('fake_migrate',self._fake_migrate) 7835 polymodel = args_get('polymodel',None) 7836 try: 7837 GLOBAL_LOCKER.acquire() 7838 self._lastsql = self._adapter.create_table( 7839 table,migrate=migrate, 7840 fake_migrate=fake_migrate, 7841 polymodel=polymodel) 7842 finally: 7843 GLOBAL_LOCKER.release() 7844 else: 7845 table._dbt = None 7846 on_define = args_get('on_define',None) 7847 if on_define: on_define(table) 7848 return table
7849
7850 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7851 dbname = db_uid = uri = None 7852 if not sanitize: 7853 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7854 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7855 db_uid=db_uid, 7856 **dict([(k, getattr(self, "_" + k)) for 7857 k in 'pool_size','folder','db_codec', 7858 'check_reserved','migrate','fake_migrate', 7859 'migrate_enabled','fake_migrate_all', 7860 'decode_credentials','driver_args', 7861 'adapter_args', 'attempts', 7862 'bigint_id','debug','lazy_tables', 7863 'do_connect'])) 7864 7865 for table in self: 7866 tablename = str(table) 7867 db_as_dict["tables"].append(tablename) 7868 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7869 sanitize=sanitize, 7870 field_options=field_options) 7871 return db_as_dict
7872
7873 - def as_xml(self, sanitize=True, field_options=True):
7874 if not have_serializers: 7875 raise ImportError("No xml serializers available") 7876 d = self.as_dict(flat=True, sanitize=sanitize, 7877 field_options=field_options) 7878 return serializers.xml(d)
7879
7880 - def as_json(self, sanitize=True, field_options=True):
7881 if not have_serializers: 7882 raise ImportError("No json serializers available") 7883 d = self.as_dict(flat=True, sanitize=sanitize, 7884 field_options=field_options) 7885 return serializers.json(d)
7886
7887 - def as_yaml(self, sanitize=True, field_options=True):
7888 if not have_serializers: 7889 raise ImportError("No YAML serializers available") 7890 d = self.as_dict(flat=True, sanitize=sanitize, 7891 field_options=field_options) 7892 return serializers.yaml(d)
7893
7894 - def __contains__(self, tablename):
7895 try: 7896 return tablename in self.tables 7897 except AttributeError: 7898 # The instance has no .tables attribute yet 7899 return False
7900 7901 has_key = __contains__ 7902
7903 - def get(self,key,default=None):
7904 return self.__dict__.get(key,default)
7905
7906 - def __iter__(self):
7907 for tablename in self.tables: 7908 yield self[tablename]
7909
7910 - def __getitem__(self, key):
7911 return self.__getattr__(str(key))
7912
7913 - def __getattr__(self, key):
7914 if ogetattr(self,'_lazy_tables') and \ 7915 key in ogetattr(self,'_LAZY_TABLES'): 7916 tablename, fields, args = self._LAZY_TABLES.pop(key) 7917 return self.lazy_define_table(tablename,*fields,**args) 7918 return ogetattr(self, key)
7919
7920 - def __setitem__(self, key, value):
7921 osetattr(self, str(key), value)
7922
7923 - def __setattr__(self, key, value):
7924 if key[:1]!='_' and key in self: 7925 raise SyntaxError( 7926 'Object %s exists and cannot be redefined' % key) 7927 osetattr(self,key,value)
7928 7929 __delitem__ = object.__delattr__ 7930
7931 - def __repr__(self):
7932 if hasattr(self,'_uri'): 7933 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7934 else: 7935 return '<DAL db_uid="%s">' % self._db_uid
7936
7937 - def smart_query(self,fields,text):
7938 return Set(self, smart_query(fields,text))
7939
7940 - def __call__(self, query=None, ignore_common_filters=None):
7941 if isinstance(query,Table): 7942 query = self._adapter.id_query(query) 7943 elif isinstance(query,Field): 7944 query = query!=None 7945 elif isinstance(query, dict): 7946 icf = query.get("ignore_common_filters") 7947 if icf: ignore_common_filters = icf 7948 return Set(self, query, ignore_common_filters=ignore_common_filters)
7949
7950 - def commit(self):
7951 self._adapter.commit()
7952
7953 - def rollback(self):
7954 self._adapter.rollback()
7955
7956 - def close(self):
7957 self._adapter.close() 7958 if self._db_uid in THREAD_LOCAL.db_instances: 7959 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7960 db_group.remove(self) 7961 if not db_group: 7962 del THREAD_LOCAL.db_instances[self._db_uid]
7963
7964 - def executesql(self, query, placeholders=None, as_dict=False, 7965 fields=None, colnames=None):
7966 """ 7967 placeholders is optional and will always be None. 7968 If using raw SQL with placeholders, placeholders may be 7969 a sequence of values to be substituted in 7970 or, (if supported by the DB driver), a dictionary with keys 7971 matching named placeholders in your SQL. 7972 7973 Added 2009-12-05 "as_dict" optional argument. Will always be 7974 None when using DAL. If using raw SQL can be set to True 7975 and the results cursor returned by the DB driver will be 7976 converted to a sequence of dictionaries keyed with the db 7977 field names. Tested with SQLite but should work with any database 7978 since the cursor.description used to get field names is part of the 7979 Python dbi 2.0 specs. Results returned with as_dict=True are 7980 the same as those returned when applying .to_list() to a DAL query. 7981 7982 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7983 7984 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7985 is provided, the results cursor returned by the DB driver will be 7986 converted to a DAL Rows object using the db._adapter.parse() method. 7987 7988 The "fields" argument is a list of DAL Field objects that match the 7989 fields returned from the DB. The Field objects should be part of one or 7990 more Table objects defined on the DAL object. The "fields" list can 7991 include one or more DAL Table objects in addition to or instead of 7992 including Field objects, or it can be just a single table (not in a 7993 list). In that case, the Field objects will be extracted from the 7994 table(s). 7995 7996 Instead of specifying the "fields" argument, the "colnames" argument 7997 can be specified as a list of field names in tablename.fieldname format. 7998 Again, these should represent tables and fields defined on the DAL 7999 object. 8000 8001 It is also possible to specify both "fields" and the associated 8002 "colnames". In that case, "fields" can also include DAL Expression 8003 objects in addition to Field objects. For Field objects in "fields", 8004 the associated "colnames" must still be in tablename.fieldname format. 8005 For Expression objects in "fields", the associated "colnames" can 8006 be any arbitrary labels. 8007 8008 Note, the DAL Table objects referred to by "fields" or "colnames" can 8009 be dummy tables and do not have to represent any real tables in the 8010 database. Also, note that the "fields" and "colnames" must be in the 8011 same order as the fields in the results cursor returned from the DB. 8012 """ 8013 adapter = self._adapter 8014 if placeholders: 8015 adapter.execute(query, placeholders) 8016 else: 8017 adapter.execute(query) 8018 if as_dict: 8019 if not hasattr(adapter.cursor,'description'): 8020 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8021 # Non-DAL legacy db query, converts cursor results to dict. 8022 # sequence of 7-item sequences. each sequence tells about a column. 8023 # first item is always the field name according to Python Database API specs 8024 columns = adapter.cursor.description 8025 # reduce the column info down to just the field names 8026 fields = [f[0] for f in columns] 8027 # will hold our finished resultset in a list 8028 data = adapter._fetchall() 8029 # convert the list for each row into a dictionary so it's 8030 # easier to work with. row['field_name'] rather than row[0] 8031 return [dict(zip(fields,row)) for row in data] 8032 try: 8033 data = adapter._fetchall() 8034 except: 8035 return None 8036 if fields or colnames: 8037 fields = [] if fields is None else fields 8038 if not isinstance(fields, list): 8039 fields = [fields] 8040 extracted_fields = [] 8041 for field in fields: 8042 if isinstance(field, Table): 8043 extracted_fields.extend([f for f in field]) 8044 else: 8045 extracted_fields.append(field) 8046 if not colnames: 8047 colnames = ['%s.%s' % (f.tablename, f.name) 8048 for f in extracted_fields] 8049 data = adapter.parse( 8050 data, fields=extracted_fields, colnames=colnames) 8051 return data
8052
8053 - def _remove_references_to(self, thistable):
8054 for table in self: 8055 table._referenced_by = [field for field in table._referenced_by 8056 if not field.table==thistable]
8057
8058 - def export_to_csv_file(self, ofile, *args, **kwargs):
8059 step = long(kwargs.get('max_fetch_rows,',500)) 8060 write_colnames = kwargs['write_colnames'] = \ 8061 kwargs.get("write_colnames", True) 8062 for table in self.tables: 8063 ofile.write('TABLE %s\r\n' % table) 8064 query = self._adapter.id_query(self[table]) 8065 nrows = self(query).count() 8066 kwargs['write_colnames'] = write_colnames 8067 for k in range(0,nrows,step): 8068 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8069 ofile, *args, **kwargs) 8070 kwargs['write_colnames'] = False 8071 ofile.write('\r\n\r\n') 8072 ofile.write('END')
8073
8074 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8075 unique='uuid', *args, **kwargs):
8076 #if id_map is None: id_map={} 8077 id_offset = {} # only used if id_map is None 8078 for line in ifile: 8079 line = line.strip() 8080 if not line: 8081 continue 8082 elif line == 'END': 8083 return 8084 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 8085 raise SyntaxError('invalid file format') 8086 else: 8087 tablename = line[6:] 8088 self[tablename].import_from_csv_file( 8089 ifile, id_map, null, unique, id_offset, *args, **kwargs)
8090
8091 -def DAL_unpickler(db_uid):
8092 return DAL('<zombie>',db_uid=db_uid)
8093
8094 -def DAL_pickler(db):
8095 return DAL_unpickler, (db._db_uid,)
8096 8097 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8098 8099 -class SQLALL(object):
8100 """ 8101 Helper class providing a comma-separated string having all the field names 8102 (prefixed by table name and '.') 8103 8104 normally only called from within gluon.sql 8105 """ 8106
8107 - def __init__(self, table):
8108 self._table = table
8109
8110 - def __str__(self):
8111 return ', '.join([str(field) for field in self._table])
8112
8113 # class Reference(int): 8114 -class Reference(long):
8115
8116 - def __allocate(self):
8117 if not self._record: 8118 self._record = self._table[long(self)] 8119 if not self._record: 8120 raise RuntimeError( 8121 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8122
8123 - def __getattr__(self, key):
8124 if key == 'id': 8125 return long(self) 8126 self.__allocate() 8127 return self._record.get(key, None)
8128
8129 - def get(self, key, default=None):
8130 return self.__getattr__(key, default)
8131
8132 - def __setattr__(self, key, value):
8133 if key.startswith('_'): 8134 long.__setattr__(self, key, value) 8135 return 8136 self.__allocate() 8137 self._record[key] = value
8138
8139 - def __getitem__(self, key):
8140 if key == 'id': 8141 return long(self) 8142 self.__allocate() 8143 return self._record.get(key, None)
8144
8145 - def __setitem__(self,key,value):
8146 self.__allocate() 8147 self._record[key] = value
8148
8149 8150 -def Reference_unpickler(data):
8151 return marshal.loads(data)
8152
8153 -def Reference_pickler(data):
8154 try: 8155 marshal_dump = marshal.dumps(long(data)) 8156 except AttributeError: 8157 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8158 return (Reference_unpickler, (marshal_dump,))
8159 8160 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8161 8162 -class MethodAdder(object):
8163 - def __init__(self,table):
8164 self.table = table
8165 - def __call__(self):
8166 return self.register()
8167 - def __getattr__(self,method_name):
8168 return self.register(method_name)
8169 - def register(self,method_name=None):
8170 def _decorated(f): 8171 instance = self.table 8172 import types 8173 method = types.MethodType(f, instance, instance.__class__) 8174 name = method_name or f.func_name 8175 setattr(instance, name, method) 8176 return f
8177 return _decorated
8178
8179 -class Table(object):
8180 8181 """ 8182 an instance of this class represents a database table 8183 8184 Example:: 8185 8186 db = DAL(...) 8187 db.define_table('users', Field('name')) 8188 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8189 db.users.drop() 8190 """ 8191
8192 - def __init__( 8193 self, 8194 db, 8195 tablename, 8196 *fields, 8197 **args 8198 ):
8199 """ 8200 Initializes the table and performs checking on the provided fields. 8201 8202 Each table will have automatically an 'id'. 8203 8204 If a field is of type Table, the fields (excluding 'id') from that table 8205 will be used instead. 8206 8207 :raises SyntaxError: when a supplied field is of incorrect type. 8208 """ 8209 self._actual = False # set to True by define_table() 8210 self._tablename = tablename 8211 self._ot = args.get('actual_name') 8212 self._sequence_name = args.get('sequence_name') or \ 8213 db and db._adapter.sequence_name(tablename) 8214 self._trigger_name = args.get('trigger_name') or \ 8215 db and db._adapter.trigger_name(tablename) 8216 self._common_filter = args.get('common_filter') 8217 self._format = args.get('format') 8218 self._singular = args.get( 8219 'singular',tablename.replace('_',' ').capitalize()) 8220 self._plural = args.get( 8221 'plural',pluralize(self._singular.lower()).capitalize()) 8222 # horrible but for backard compatibility of appamdin: 8223 if 'primarykey' in args and args['primarykey'] is not None: 8224 self._primarykey = args.get('primarykey') 8225 8226 self._before_insert = [] 8227 self._before_update = [Set.delete_uploaded_files] 8228 self._before_delete = [Set.delete_uploaded_files] 8229 self._after_insert = [] 8230 self._after_update = [] 8231 self._after_delete = [] 8232 8233 self.add_method = MethodAdder(self) 8234 8235 fieldnames,newfields=set(),[] 8236 if hasattr(self,'_primarykey'): 8237 if not isinstance(self._primarykey,list): 8238 raise SyntaxError( 8239 "primarykey must be a list of fields from table '%s'" \ 8240 % tablename) 8241 if len(self._primarykey)==1: 8242 self._id = [f for f in fields if isinstance(f,Field) \ 8243 and f.name==self._primarykey[0]][0] 8244 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8245 field = Field('id', 'id') 8246 newfields.append(field) 8247 fieldnames.add('id') 8248 self._id = field 8249 virtual_fields = [] 8250 for field in fields: 8251 if isinstance(field, (FieldMethod, FieldVirtual)): 8252 virtual_fields.append(field) 8253 elif isinstance(field, Field) and not field.name in fieldnames: 8254 if field.db is not None: 8255 field = copy.copy(field) 8256 newfields.append(field) 8257 fieldnames.add(field.name) 8258 if field.type=='id': 8259 self._id = field 8260 elif isinstance(field, Table): 8261 table = field 8262 for field in table: 8263 if not field.name in fieldnames and not field.type=='id': 8264 t2 = not table._actual and self._tablename 8265 field = field.clone(point_self_references_to=t2) 8266 newfields.append(field) 8267 fieldnames.add(field.name) 8268 elif not isinstance(field, (Field, Table)): 8269 raise SyntaxError( 8270 'define_table argument is not a Field or Table: %s' % field) 8271 fields = newfields 8272 self._db = db 8273 tablename = tablename 8274 self._fields = SQLCallableList() 8275 self.virtualfields = [] 8276 fields = list(fields) 8277 8278 if db and db._adapter.uploads_in_blob==True: 8279 uploadfields = [f.name for f in fields if f.type=='blob'] 8280 for field in fields: 8281 fn = field.uploadfield 8282 if isinstance(field, Field) and field.type == 'upload'\ 8283 and fn is True: 8284 fn = field.uploadfield = '%s_blob' % field.name 8285 if isinstance(fn,str) and not fn in uploadfields: 8286 fields.append(Field(fn,'blob',default='', 8287 writable=False,readable=False)) 8288 8289 lower_fieldnames = set() 8290 reserved = dir(Table) + ['fields'] 8291 for field in fields: 8292 field_name = field.name 8293 if db and db.check_reserved: 8294 db.check_reserved_keyword(field_name) 8295 elif field_name in reserved: 8296 raise SyntaxError("field name %s not allowed" % field_name) 8297 8298 if field_name.lower() in lower_fieldnames: 8299 raise SyntaxError("duplicate field %s in table %s" \ 8300 % (field_name, tablename)) 8301 else: 8302 lower_fieldnames.add(field_name.lower()) 8303 8304 self.fields.append(field_name) 8305 self[field_name] = field 8306 if field.type == 'id': 8307 self['id'] = field 8308 field.tablename = field._tablename = tablename 8309 field.table = field._table = self 8310 field.db = field._db = db 8311 if db and not field.type in ('text', 'blob', 'json') and \ 8312 db._adapter.maxcharlength < field.length: 8313 field.length = db._adapter.maxcharlength 8314 self.ALL = SQLALL(self) 8315 8316 if hasattr(self,'_primarykey'): 8317 for k in self._primarykey: 8318 if k not in self.fields: 8319 raise SyntaxError( 8320 "primarykey must be a list of fields from table '%s " % tablename) 8321 else: 8322 self[k].notnull = True 8323 for field in virtual_fields: 8324 self[field.name] = field
8325 8326 @property
8327 - def fields(self):
8328 return self._fields
8329
8330 - def update(self,*args,**kwargs):
8331 raise RuntimeError("Syntax Not Supported")
8332
8333 - def _enable_record_versioning(self, 8334 archive_db=None, 8335 archive_name = '%(tablename)s_archive', 8336 current_record = 'current_record', 8337 is_active = 'is_active'):
8338 archive_db = archive_db or self._db 8339 archive_name = archive_name % dict(tablename=self._tablename) 8340 if archive_name in archive_db.tables(): 8341 return # do not try define the archive if already exists 8342 fieldnames = self.fields() 8343 field_type = self if archive_db is self._db else 'bigint' 8344 archive_db.define_table( 8345 archive_name, 8346 Field(current_record,field_type), 8347 *[field.clone(unique=False) for field in self]) 8348 self._before_update.append( 8349 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8350 archive_record(qset,fs,db[an],cn)) 8351 if is_active and is_active in fieldnames: 8352 self._before_delete.append( 8353 lambda qset: qset.update(is_active=False)) 8354 newquery = lambda query, t=self: t.is_active == True 8355 query = self._common_filter 8356 if query: 8357 newquery = query & newquery 8358 self._common_filter = newquery
8359
8360 - def _validate(self,**vars):
8361 errors = Row() 8362 for key,value in vars.iteritems(): 8363 value,error = self[key].validate(value) 8364 if error: 8365 errors[key] = error 8366 return errors
8367
8368 - def _create_references(self):
8369 db = self._db 8370 pr = db._pending_references 8371 self._referenced_by = [] 8372 for field in self: 8373 fieldname = field.name 8374 field_type = field.type 8375 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8376 ref = field_type[10:].strip() 8377 if not ref.split(): 8378 raise SyntaxError('Table: reference to nothing: %s' %ref) 8379 refs = ref.split('.') 8380 rtablename = refs[0] 8381 if not rtablename in db: 8382 pr[rtablename] = pr.get(rtablename,[]) + [field] 8383 continue 8384 rtable = db[rtablename] 8385 if len(refs)==2: 8386 rfieldname = refs[1] 8387 if not hasattr(rtable,'_primarykey'): 8388 raise SyntaxError( 8389 'keyed tables can only reference other keyed tables (for now)') 8390 if rfieldname not in rtable.fields: 8391 raise SyntaxError( 8392 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8393 % (rfieldname, rtablename, self._tablename)) 8394 rtable._referenced_by.append(field) 8395 for referee in pr.get(self._tablename,[]): 8396 self._referenced_by.append(referee)
8397
8398 - def _filter_fields(self, record, id=False):
8399 return dict([(k, v) for (k, v) in record.iteritems() if k 8400 in self.fields and (self[k].type!='id' or id)])
8401
8402 - def _build_query(self,key):
8403 """ for keyed table only """ 8404 query = None 8405 for k,v in key.iteritems(): 8406 if k in self._primarykey: 8407 if query: 8408 query = query & (self[k] == v) 8409 else: 8410 query = (self[k] == v) 8411 else: 8412 raise SyntaxError( 8413 'Field %s is not part of the primary key of %s' % \ 8414 (k,self._tablename)) 8415 return query
8416
8417 - def __getitem__(self, key):
8418 if not key: 8419 return None 8420 elif isinstance(key, dict): 8421 """ for keyed table """ 8422 query = self._build_query(key) 8423 rows = self._db(query).select() 8424 if rows: 8425 return rows[0] 8426 return None 8427 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8428 return self._db(self._id == key).select(limitby=(0,1)).first() 8429 elif key: 8430 return ogetattr(self, str(key))
8431
8432 - def __call__(self, key=DEFAULT, **kwargs):
8433 for_update = kwargs.get('_for_update',False) 8434 if '_for_update' in kwargs: del kwargs['_for_update'] 8435 8436 orderby = kwargs.get('_orderby',None) 8437 if '_orderby' in kwargs: del kwargs['_orderby'] 8438 8439 if not key is DEFAULT: 8440 if isinstance(key, Query): 8441 record = self._db(key).select( 8442 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8443 elif not str(key).isdigit(): 8444 record = None 8445 else: 8446 record = self._db(self._id == key).select( 8447 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8448 if record: 8449 for k,v in kwargs.iteritems(): 8450 if record[k]!=v: return None 8451 return record 8452 elif kwargs: 8453 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8454 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby).first() 8455 else: 8456 return None
8457
8458 - def __setitem__(self, key, value):
8459 if isinstance(key, dict) and isinstance(value, dict): 8460 """ option for keyed table """ 8461 if set(key.keys()) == set(self._primarykey): 8462 value = self._filter_fields(value) 8463 kv = {} 8464 kv.update(value) 8465 kv.update(key) 8466 if not self.insert(**kv): 8467 query = self._build_query(key) 8468 self._db(query).update(**self._filter_fields(value)) 8469 else: 8470 raise SyntaxError( 8471 'key must have all fields from primary key: %s'%\ 8472 (self._primarykey)) 8473 elif str(key).isdigit(): 8474 if key == 0: 8475 self.insert(**self._filter_fields(value)) 8476 elif self._db(self._id == key)\ 8477 .update(**self._filter_fields(value)) is None: 8478 raise SyntaxError('No such record: %s' % key) 8479 else: 8480 if isinstance(key, dict): 8481 raise SyntaxError( 8482 'value must be a dictionary: %s' % value) 8483 osetattr(self, str(key), value)
8484 8485 __getattr__ = __getitem__ 8486
8487 - def __setattr__(self, key, value):
8488 if key[:1]!='_' and key in self: 8489 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8490 osetattr(self,key,value)
8491
8492 - def __delitem__(self, key):
8493 if isinstance(key, dict): 8494 query = self._build_query(key) 8495 if not self._db(query).delete(): 8496 raise SyntaxError('No such record: %s' % key) 8497 elif not str(key).isdigit() or \ 8498 not self._db(self._id == key).delete(): 8499 raise SyntaxError('No such record: %s' % key)
8500
8501 - def __contains__(self,key):
8502 return hasattr(self,key)
8503 8504 has_key = __contains__ 8505
8506 - def items(self):
8507 return self.__dict__.items()
8508
8509 - def __iter__(self):
8510 for fieldname in self.fields: 8511 yield self[fieldname]
8512
8513 - def iteritems(self):
8514 return self.__dict__.iteritems()
8515 8516
8517 - def __repr__(self):
8518 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8519
8520 - def __str__(self):
8521 if self._ot is not None: 8522 if 'Oracle' in str(type(self._db._adapter)): # <<< patch 8523 return '%s %s' % (self._ot, self._tablename) # <<< patch 8524 return '%s AS %s' % (self._ot, self._tablename) 8525 return self._tablename
8526
8527 - def _drop(self, mode = ''):
8528 return self._db._adapter._drop(self, mode)
8529
8530 - def drop(self, mode = ''):
8531 return self._db._adapter.drop(self,mode)
8532
8533 - def _listify(self,fields,update=False):
8534 new_fields = {} # format: new_fields[name] = (field,value) 8535 8536 # store all fields passed as input in new_fields 8537 for name in fields: 8538 if not name in self.fields: 8539 if name != 'id': 8540 raise SyntaxError( 8541 'Field %s does not belong to the table' % name) 8542 else: 8543 field = self[name] 8544 value = fields[name] 8545 if field.filter_in: 8546 value = field.filter_in(value) 8547 new_fields[name] = (field,value) 8548 8549 # check all fields that should be in the table but are not passed 8550 to_compute = [] 8551 for ofield in self: 8552 name = ofield.name 8553 if not name in new_fields: 8554 # if field is supposed to be computed, compute it! 8555 if ofield.compute: # save those to compute for later 8556 to_compute.append((name,ofield)) 8557 # if field is required, check its default value 8558 elif not update and not ofield.default is None: 8559 value = ofield.default 8560 fields[name] = value 8561 new_fields[name] = (ofield,value) 8562 # if this is an update, user the update field instead 8563 elif update and not ofield.update is None: 8564 value = ofield.update 8565 fields[name] = value 8566 new_fields[name] = (ofield,value) 8567 # if the field is still not there but it should, error 8568 elif not update and ofield.required: 8569 raise RuntimeError( 8570 'Table: missing required field: %s' % name) 8571 # now deal with fields that are supposed to be computed 8572 if to_compute: 8573 row = Row(fields) 8574 for name,ofield in to_compute: 8575 # try compute it 8576 try: 8577 new_fields[name] = (ofield,ofield.compute(row)) 8578 except (KeyError, AttributeError): 8579 # error sinlently unless field is required! 8580 if ofield.required: 8581 raise SyntaxError('unable to comput field: %s' % name) 8582 return new_fields.values()
8583
8584 - def _attempt_upload(self, fields):
8585 for field in self: 8586 if field.type=='upload' and field.name in fields: 8587 value = fields[field.name] 8588 if value and not isinstance(value,str): 8589 if hasattr(value,'file') and hasattr(value,'filename'): 8590 new_name = field.store(value.file,filename=value.filename) 8591 elif hasattr(value,'read') and hasattr(value,'name'): 8592 new_name = field.store(value,filename=value.name) 8593 else: 8594 raise RuntimeError("Unable to handle upload") 8595 fields[field.name] = new_name
8596
8597 - def _defaults(self, fields):
8598 "If there are no fields/values specified, return table defaults" 8599 if not fields: 8600 fields = {} 8601 for field in self: 8602 if field.type != "id": 8603 fields[field.name] = field.default 8604 return fields
8605
8606 - def _insert(self, **fields):
8607 fields = self._defaults(fields) 8608 return self._db._adapter._insert(self, self._listify(fields))
8609
8610 - def insert(self, **fields):
8611 fields = self._defaults(fields) 8612 self._attempt_upload(fields) 8613 if any(f(fields) for f in self._before_insert): return 0 8614 ret = self._db._adapter.insert(self, self._listify(fields)) 8615 if ret and self._after_insert: 8616 fields = Row(fields) 8617 [f(fields,ret) for f in self._after_insert] 8618 return ret
8619
8620 - def validate_and_insert(self,**fields):
8621 response = Row() 8622 response.errors = Row() 8623 new_fields = copy.copy(fields) 8624 for key,value in fields.iteritems(): 8625 value,error = self[key].validate(value) 8626 if error: 8627 response.errors[key] = "%s" % error 8628 else: 8629 new_fields[key] = value 8630 if not response.errors: 8631 response.id = self.insert(**new_fields) 8632 else: 8633 response.id = None 8634 return response
8635
8636 - def update_or_insert(self, _key=DEFAULT, **values):
8637 if _key is DEFAULT: 8638 record = self(**values) 8639 elif isinstance(_key,dict): 8640 record = self(**_key) 8641 else: 8642 record = self(_key) 8643 if record: 8644 record.update_record(**values) 8645 newid = None 8646 else: 8647 newid = self.insert(**values) 8648 return newid
8649
8650 - def bulk_insert(self, items):
8651 """ 8652 here items is a list of dictionaries 8653 """ 8654 items = [self._listify(item) for item in items] 8655 if any(f(item) for item in items for f in self._before_insert):return 0 8656 ret = self._db._adapter.bulk_insert(self,items) 8657 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8658 return ret
8659
8660 - def _truncate(self, mode = None):
8661 return self._db._adapter._truncate(self, mode)
8662
8663 - def truncate(self, mode = None):
8664 return self._db._adapter.truncate(self, mode)
8665
8666 - def import_from_csv_file( 8667 self, 8668 csvfile, 8669 id_map=None, 8670 null='<NULL>', 8671 unique='uuid', 8672 id_offset=None, # id_offset used only when id_map is None 8673 *args, **kwargs 8674 ):
8675 """ 8676 Import records from csv file. 8677 Column headers must have same names as table fields. 8678 Field 'id' is ignored. 8679 If column names read 'table.file' the 'table.' prefix is ignored. 8680 'unique' argument is a field which must be unique 8681 (typically a uuid field) 8682 'restore' argument is default False; 8683 if set True will remove old values in table first. 8684 'id_map' ff set to None will not map ids. 8685 The import will keep the id numbers in the restored table. 8686 This assumes that there is an field of type id that 8687 is integer and in incrementing order. 8688 Will keep the id numbers in restored table. 8689 """ 8690 8691 delimiter = kwargs.get('delimiter', ',') 8692 quotechar = kwargs.get('quotechar', '"') 8693 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8694 restore = kwargs.get('restore', False) 8695 if restore: 8696 self._db[self].truncate() 8697 8698 reader = csv.reader(csvfile, delimiter=delimiter, 8699 quotechar=quotechar, quoting=quoting) 8700 colnames = None 8701 if isinstance(id_map, dict): 8702 if not self._tablename in id_map: 8703 id_map[self._tablename] = {} 8704 id_map_self = id_map[self._tablename] 8705 8706 def fix(field, value, id_map, id_offset): 8707 list_reference_s='list:reference' 8708 if value == null: 8709 value = None 8710 elif field.type=='blob': 8711 value = base64.b64decode(value) 8712 elif field.type=='double' or field.type=='float': 8713 if not value.strip(): 8714 value = None 8715 else: 8716 value = float(value) 8717 elif field.type in ('integer','bigint'): 8718 if not value.strip(): 8719 value = None 8720 else: 8721 value = long(value) 8722 elif field.type.startswith('list:string'): 8723 value = bar_decode_string(value) 8724 elif field.type.startswith(list_reference_s): 8725 ref_table = field.type[len(list_reference_s):].strip() 8726 if id_map is not None: 8727 value = [id_map[ref_table][long(v)] \ 8728 for v in bar_decode_string(value)] 8729 else: 8730 value = [v for v in bar_decode_string(value)] 8731 elif field.type.startswith('list:'): 8732 value = bar_decode_integer(value) 8733 elif id_map and field.type.startswith('reference'): 8734 try: 8735 value = id_map[field.type[9:].strip()][long(value)] 8736 except KeyError: 8737 pass 8738 elif id_offset and field.type.startswith('reference'): 8739 try: 8740 value = id_offset[field.type[9:].strip()]+long(value) 8741 except KeyError: 8742 pass 8743 return (field.name, value)
8744 8745 def is_id(colname): 8746 if colname in self: 8747 return self[colname].type == 'id' 8748 else: 8749 return False 8750 8751 first = True 8752 unique_idx = None 8753 for line in reader: 8754 if not line: 8755 break 8756 if not colnames: 8757 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8758 cols, cid = [], None 8759 for i,colname in enumerate(colnames): 8760 if is_id(colname): 8761 cid = i 8762 else: 8763 cols.append(i) 8764 if colname == unique: 8765 unique_idx = i 8766 else: 8767 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8768 for i in cols if colnames[i] in self.fields] 8769 8770 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8771 csv_id = long(line[cid]) 8772 curr_id = self.insert(**dict(items)) 8773 if first: 8774 first = False 8775 # First curr_id is bigger than csv_id, 8776 # then we are not restoring but 8777 # extending db table with csv db table 8778 if curr_id>csv_id: 8779 id_offset[self._tablename] = curr_id-csv_id 8780 else: 8781 id_offset[self._tablename] = 0 8782 # create new id until we get the same as old_id+offset 8783 while curr_id<csv_id+id_offset[self._tablename]: 8784 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8785 curr_id = self.insert(**dict(items)) 8786 # Validation. Check for duplicate of 'unique' &, 8787 # if present, update instead of insert. 8788 elif not unique_idx: 8789 new_id = self.insert(**dict(items)) 8790 else: 8791 unique_value = line[unique_idx] 8792 query = self._db[self][unique] == unique_value 8793 record = self._db(query).select().first() 8794 if record: 8795 record.update_record(**dict(items)) 8796 new_id = record[self._id.name] 8797 else: 8798 new_id = self.insert(**dict(items)) 8799 if id_map and cid is not None: 8800 id_map_self[long(line[cid])] = new_id 8801
8802 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8803 tablename = str(self) 8804 table_as_dict = dict(name=tablename, items={}, fields=[], 8805 sequence_name=self._sequence_name, 8806 trigger_name=self._trigger_name, 8807 common_filter=self._common_filter, format=self._format, 8808 singular=self._singular, plural=self._plural) 8809 8810 for field in self: 8811 if (field.readable or field.writable) or (not sanitize): 8812 table_as_dict["fields"].append(field.name) 8813 table_as_dict["items"][field.name] = \ 8814 field.as_dict(flat=flat, sanitize=sanitize, 8815 options=field_options) 8816 return table_as_dict
8817
8818 - def as_xml(self, sanitize=True, field_options=True):
8819 if not have_serializers: 8820 raise ImportError("No xml serializers available") 8821 d = self.as_dict(flat=True, sanitize=sanitize, 8822 field_options=field_options) 8823 return serializers.xml(d)
8824
8825 - def as_json(self, sanitize=True, field_options=True):
8826 if not have_serializers: 8827 raise ImportError("No json serializers available") 8828 d = self.as_dict(flat=True, sanitize=sanitize, 8829 field_options=field_options) 8830 return serializers.json(d)
8831
8832 - def as_yaml(self, sanitize=True, field_options=True):
8833 if not have_serializers: 8834 raise ImportError("No YAML serializers available") 8835 d = self.as_dict(flat=True, sanitize=sanitize, 8836 field_options=field_options) 8837 return serializers.yaml(d)
8838
8839 - def with_alias(self, alias):
8840 return self._db._adapter.alias(self,alias)
8841
8842 - def on(self, query):
8843 return Expression(self._db,self._db._adapter.ON,self,query)
8844
8845 -def archive_record(qset,fs,archive_table,current_record):
8846 tablenames = qset.db._adapter.tables(qset.query) 8847 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8848 table = qset.db[tablenames[0]] 8849 for row in qset.select(): 8850 fields = archive_table._filter_fields(row) 8851 fields[current_record] = row.id 8852 archive_table.insert(**fields) 8853 return False
8854
8855 8856 8857 -class Expression(object):
8858
8859 - def __init__( 8860 self, 8861 db, 8862 op, 8863 first=None, 8864 second=None, 8865 type=None, 8866 **optional_args 8867 ):
8868 8869 self.db = db 8870 self.op = op 8871 self.first = first 8872 self.second = second 8873 self._table = getattr(first,'_table',None) 8874 ### self._tablename = first._tablename ## CHECK 8875 if not type and first and hasattr(first,'type'): 8876 self.type = first.type 8877 else: 8878 self.type = type 8879 self.optional_args = optional_args
8880
8881 - def sum(self):
8882 db = self.db 8883 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8884
8885 - def max(self):
8886 db = self.db 8887 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8888
8889 - def min(self):
8890 db = self.db 8891 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8892
8893 - def len(self):
8894 db = self.db 8895 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
8896
8897 - def avg(self):
8898 db = self.db 8899 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8900
8901 - def abs(self):
8902 db = self.db 8903 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8904
8905 - def lower(self):
8906 db = self.db 8907 return Expression(db, db._adapter.LOWER, self, None, self.type)
8908
8909 - def upper(self):
8910 db = self.db 8911 return Expression(db, db._adapter.UPPER, self, None, self.type)
8912
8913 - def replace(self,a,b):
8914 db = self.db 8915 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
8916
8917 - def year(self):
8918 db = self.db 8919 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8920
8921 - def month(self):
8922 db = self.db 8923 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8924
8925 - def day(self):
8926 db = self.db 8927 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8928
8929 - def hour(self):
8930 db = self.db 8931 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8932
8933 - def minutes(self):
8934 db = self.db 8935 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8936
8937 - def coalesce(self,*others):
8938 db = self.db 8939 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8940
8941 - def coalesce_zero(self):
8942 db = self.db 8943 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8944
8945 - def seconds(self):
8946 db = self.db 8947 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8948
8949 - def epoch(self):
8950 db = self.db 8951 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8952
8953 - def __getslice__(self, start, stop):
8954 db = self.db 8955 if start < 0: 8956 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8957 else: 8958 pos0 = start + 1 8959 8960 if stop < 0: 8961 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8962 elif stop == sys.maxint: 8963 length = self.len() 8964 else: 8965 length = '(%s - %s)' % (stop + 1, pos0) 8966 return Expression(db,db._adapter.SUBSTRING, 8967 self, (pos0, length), self.type)
8968
8969 - def __getitem__(self, i):
8970 return self[i:i + 1]
8971
8972 - def __str__(self):
8973 return self.db._adapter.expand(self,self.type)
8974
8975 - def __or__(self, other): # for use in sortby
8976 db = self.db 8977 return Expression(db,db._adapter.COMMA,self,other,self.type)
8978
8979 - def __invert__(self):
8980 db = self.db 8981 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8982 return self.first 8983 return Expression(db,db._adapter.INVERT,self,type=self.type)
8984
8985 - def __add__(self, other):
8986 db = self.db 8987 return Expression(db,db._adapter.ADD,self,other,self.type)
8988
8989 - def __sub__(self, other):
8990 db = self.db 8991 if self.type in ('integer','bigint'): 8992 result_type = 'integer' 8993 elif self.type in ['date','time','datetime','double','float']: 8994 result_type = 'double' 8995 elif self.type.startswith('decimal('): 8996 result_type = self.type 8997 else: 8998 raise SyntaxError("subtraction operation not supported for type") 8999 return Expression(db,db._adapter.SUB,self,other,result_type)
9000
9001 - def __mul__(self, other):
9002 db = self.db 9003 return Expression(db,db._adapter.MUL,self,other,self.type)
9004
9005 - def __div__(self, other):
9006 db = self.db 9007 return Expression(db,db._adapter.DIV,self,other,self.type)
9008
9009 - def __mod__(self, other):
9010 db = self.db 9011 return Expression(db,db._adapter.MOD,self,other,self.type)
9012
9013 - def __eq__(self, value):
9014 db = self.db 9015 return Query(db, db._adapter.EQ, self, value)
9016
9017 - def __ne__(self, value):
9018 db = self.db 9019 return Query(db, db._adapter.NE, self, value)
9020
9021 - def __lt__(self, value):
9022 db = self.db 9023 return Query(db, db._adapter.LT, self, value)
9024
9025 - def __le__(self, value):
9026 db = self.db 9027 return Query(db, db._adapter.LE, self, value)
9028
9029 - def __gt__(self, value):
9030 db = self.db 9031 return Query(db, db._adapter.GT, self, value)
9032
9033 - def __ge__(self, value):
9034 db = self.db 9035 return Query(db, db._adapter.GE, self, value)
9036
9037 - def like(self, value, case_sensitive=False):
9038 db = self.db 9039 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9040 return Query(db, op, self, value)
9041
9042 - def regexp(self, value):
9043 db = self.db 9044 return Query(db, db._adapter.REGEXP, self, value)
9045
9046 - def belongs(self, *value):
9047 """ 9048 Accepts the following inputs: 9049 field.belongs(1,2) 9050 field.belongs((1,2)) 9051 field.belongs(query) 9052 9053 Does NOT accept: 9054 field.belongs(1) 9055 """ 9056 db = self.db 9057 if len(value) == 1: 9058 value = value[0] 9059 if isinstance(value,Query): 9060 value = db(value)._select(value.first._table._id) 9061 return Query(db, db._adapter.BELONGS, self, value)
9062
9063 - def startswith(self, value):
9064 db = self.db 9065 if not self.type in ('string', 'text', 'json'): 9066 raise SyntaxError("startswith used with incompatible field type") 9067 return Query(db, db._adapter.STARTSWITH, self, value)
9068
9069 - def endswith(self, value):
9070 db = self.db 9071 if not self.type in ('string', 'text', 'json'): 9072 raise SyntaxError("endswith used with incompatible field type") 9073 return Query(db, db._adapter.ENDSWITH, self, value)
9074
9075 - def contains(self, value, all=False, case_sensitive=False):
9076 """ 9077 The case_sensitive parameters is only useful for PostgreSQL 9078 For other RDMBs it is ignored and contains is always case in-sensitive 9079 For MongoDB and GAE contains is always case sensitive 9080 """ 9081 db = self.db 9082 if isinstance(value,(list, tuple)): 9083 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9084 for v in value if str(v).strip()] 9085 if not subqueries: 9086 return self.contains('') 9087 else: 9088 return reduce(all and AND or OR,subqueries) 9089 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 9090 raise SyntaxError("contains used with incompatible field type") 9091 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9092
9093 - def with_alias(self, alias):
9094 db = self.db 9095 return Expression(db, db._adapter.AS, self, alias, self.type)
9096 9097 # GIS expressions 9098
9099 - def st_asgeojson(self, precision=15, options=0, version=1):
9100 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9101 dict(precision=precision, options=options, 9102 version=version), 'string')
9103
9104 - def st_astext(self):
9105 db = self.db 9106 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9107
9108 - def st_x(self):
9109 db = self.db 9110 return Expression(db, db._adapter.ST_X, self, type='string')
9111
9112 - def st_y(self):
9113 db = self.db 9114 return Expression(db, db._adapter.ST_Y, self, type='string')
9115
9116 - def st_distance(self, other):
9117 db = self.db 9118 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9119
9120 - def st_simplify(self, value):
9121 db = self.db 9122 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9123 9124 # GIS queries 9125
9126 - def st_contains(self, value):
9127 db = self.db 9128 return Query(db, db._adapter.ST_CONTAINS, self, value)
9129
9130 - def st_equals(self, value):
9131 db = self.db 9132 return Query(db, db._adapter.ST_EQUALS, self, value)
9133
9134 - def st_intersects(self, value):
9135 db = self.db 9136 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9137
9138 - def st_overlaps(self, value):
9139 db = self.db 9140 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9141
9142 - def st_touches(self, value):
9143 db = self.db 9144 return Query(db, db._adapter.ST_TOUCHES, self, value)
9145
9146 - def st_within(self, value):
9147 db = self.db 9148 return Query(db, db._adapter.ST_WITHIN, self, value)
9149
9150 # for use in both Query and sortby 9151 9152 9153 -class SQLCustomType(object):
9154 """ 9155 allows defining of custom SQL types 9156 9157 Example:: 9158 9159 decimal = SQLCustomType( 9160 type ='double', 9161 native ='integer', 9162 encoder =(lambda x: int(float(x) * 100)), 9163 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9164 ) 9165 9166 db.define_table( 9167 'example', 9168 Field('value', type=decimal) 9169 ) 9170 9171 :param type: the web2py type (default = 'string') 9172 :param native: the backend type 9173 :param encoder: how to encode the value to store it in the backend 9174 :param decoder: how to decode the value retrieved from the backend 9175 :param validator: what validators to use ( default = None, will use the 9176 default validator for type) 9177 """ 9178
9179 - def __init__( 9180 self, 9181 type='string', 9182 native=None, 9183 encoder=None, 9184 decoder=None, 9185 validator=None, 9186 _class=None, 9187 ):
9188 9189 self.type = type 9190 self.native = native 9191 self.encoder = encoder or (lambda x: x) 9192 self.decoder = decoder or (lambda x: x) 9193 self.validator = validator 9194 self._class = _class or type
9195
9196 - def startswith(self, text=None):
9197 try: 9198 return self.type.startswith(self, text) 9199 except TypeError: 9200 return False
9201
9202 - def __getslice__(self, a=0, b=100):
9203 return None
9204
9205 - def __getitem__(self, i):
9206 return None
9207
9208 - def __str__(self):
9209 return self._class
9210
9211 -class FieldVirtual(object):
9212 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9213 # for backward compatibility 9214 (self.name, self.f) = (name, f) if f else ('unkown', name) 9215 self.type = ftype 9216 self.label = label or self.name.capitalize().replace('_',' ') 9217 self.represent = IDENTITY 9218 self.formatter = IDENTITY 9219 self.comment = None 9220 self.readable = True 9221 self.writable = False 9222 self.requires = None 9223 self.widget = None 9224 self.tablename = table_name 9225 self.filter_out = None
9226
9227 -class FieldMethod(object):
9228 - def __init__(self, name, f=None, handler=None):
9229 # for backward compatibility 9230 (self.name, self.f) = (name, f) if f else ('unkown', name) 9231 self.handler = handler
9232
9233 -def list_represent(x,r=None):
9234 return ', '.join(str(y) for y in x or [])
9235
9236 -class Field(Expression):
9237 9238 Virtual = FieldVirtual 9239 Method = FieldMethod 9240 Lazy = FieldMethod # for backward compatibility 9241 9242 """ 9243 an instance of this class represents a database field 9244 9245 example:: 9246 9247 a = Field(name, 'string', length=32, default=None, required=False, 9248 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9249 notnull=False, unique=False, 9250 uploadfield=True, widget=None, label=None, comment=None, 9251 uploadfield=True, # True means store on disk, 9252 # 'a_field_name' means store in this field in db 9253 # False means file content will be discarded. 9254 writable=True, readable=True, update=None, authorize=None, 9255 autodelete=False, represent=None, uploadfolder=None, 9256 uploadseparate=False # upload to separate directories by uuid_keys 9257 # first 2 character and tablename.fieldname 9258 # False - old behavior 9259 # True - put uploaded file in 9260 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9261 # directory) 9262 uploadfs=None # a pyfilesystem where to store upload 9263 9264 to be used as argument of DAL.define_table 9265 9266 allowed field types: 9267 string, boolean, integer, double, text, blob, 9268 date, time, datetime, upload, password 9269 9270 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 9271 fields should have a default or they will be required in SQLFORMs 9272 the requires argument is used to validate the field input in SQLFORMs 9273 9274 """ 9275
9276 - def __init__( 9277 self, 9278 fieldname, 9279 type='string', 9280 length=None, 9281 default=DEFAULT, 9282 required=False, 9283 requires=DEFAULT, 9284 ondelete='CASCADE', 9285 notnull=False, 9286 unique=False, 9287 uploadfield=True, 9288 widget=None, 9289 label=None, 9290 comment=None, 9291 writable=True, 9292 readable=True, 9293 update=None, 9294 authorize=None, 9295 autodelete=False, 9296 represent=None, 9297 uploadfolder=None, 9298 uploadseparate=False, 9299 uploadfs=None, 9300 compute=None, 9301 custom_store=None, 9302 custom_retrieve=None, 9303 custom_retrieve_file_properties=None, 9304 custom_delete=None, 9305 filter_in = None, 9306 filter_out = None, 9307 custom_qualifier = None, 9308 map_none = None, 9309 ):
9310 self._db = self.db = None # both for backward compatibility 9311 self.op = None 9312 self.first = None 9313 self.second = None 9314 self.name = fieldname = cleanup(fieldname) 9315 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9316 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9317 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9318 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9319 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9320 self.default = default if default!=DEFAULT else (update or None) 9321 self.required = required # is this field required 9322 self.ondelete = ondelete.upper() # this is for reference fields only 9323 self.notnull = notnull 9324 self.unique = unique 9325 self.uploadfield = uploadfield 9326 self.uploadfolder = uploadfolder 9327 self.uploadseparate = uploadseparate 9328 self.uploadfs = uploadfs 9329 self.widget = widget 9330 self.comment = comment 9331 self.writable = writable 9332 self.readable = readable 9333 self.update = update 9334 self.authorize = authorize 9335 self.autodelete = autodelete 9336 self.represent = list_represent if \ 9337 represent==None and type in ('list:integer','list:string') else represent 9338 self.compute = compute 9339 self.isattachment = True 9340 self.custom_store = custom_store 9341 self.custom_retrieve = custom_retrieve 9342 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9343 self.custom_delete = custom_delete 9344 self.filter_in = filter_in 9345 self.filter_out = filter_out 9346 self.custom_qualifier = custom_qualifier 9347 self.label = label if label!=None else fieldname.replace('_',' ').title() 9348 self.requires = requires if requires!=None else [] 9349 self.map_none = map_none
9350
9351 - def set_attributes(self,*args,**attributes):
9352 self.__dict__.update(*args,**attributes)
9353
9354 - def clone(self,point_self_references_to=False,**args):
9355 field = copy.copy(self) 9356 if point_self_references_to and \ 9357 field.type == 'reference %s'+field._tablename: 9358 field.type = 'reference %s' % point_self_references_to 9359 field.__dict__.update(args) 9360 return field
9361
9362 - def store(self, file, filename=None, path=None):
9363 if self.custom_store: 9364 return self.custom_store(file,filename,path) 9365 if isinstance(file, cgi.FieldStorage): 9366 filename = filename or file.filename 9367 file = file.file 9368 elif not filename: 9369 filename = file.name 9370 filename = os.path.basename(filename.replace('/', os.sep)\ 9371 .replace('\\', os.sep)) 9372 m = REGEX_STORE_PATTERN.search(filename) 9373 extension = m and m.group('e') or 'txt' 9374 uuid_key = web2py_uuid().replace('-', '')[-16:] 9375 encoded_filename = base64.b16encode(filename).lower() 9376 newfilename = '%s.%s.%s.%s' % \ 9377 (self._tablename, self.name, uuid_key, encoded_filename) 9378 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9379 self_uploadfield = self.uploadfield 9380 if isinstance(self_uploadfield,Field): 9381 blob_uploadfield_name = self_uploadfield.uploadfield 9382 keys={self_uploadfield.name: newfilename, 9383 blob_uploadfield_name: file.read()} 9384 self_uploadfield.table.insert(**keys) 9385 elif self_uploadfield == True: 9386 if path: 9387 pass 9388 elif self.uploadfolder: 9389 path = self.uploadfolder 9390 elif self.db._adapter.folder: 9391 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9392 else: 9393 raise RuntimeError( 9394 "you must specify a Field(...,uploadfolder=...)") 9395 if self.uploadseparate: 9396 if self.uploadfs: 9397 raise RuntimeError("not supported") 9398 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9399 uuid_key[:2]) 9400 if not exists(path): 9401 os.makedirs(path) 9402 pathfilename = pjoin(path, newfilename) 9403 if self.uploadfs: 9404 dest_file = self.uploadfs.open(newfilename, 'wb') 9405 else: 9406 dest_file = open(pathfilename, 'wb') 9407 try: 9408 shutil.copyfileobj(file, dest_file) 9409 except IOError: 9410 raise IOError( 9411 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9412 dest_file.close() 9413 return newfilename
9414
9415 - def retrieve(self, name, path=None, nameonly=False):
9416 """ 9417 if nameonly==True return (filename, fullfilename) instead of 9418 (filename, stream) 9419 """ 9420 self_uploadfield = self.uploadfield 9421 if self.custom_retrieve: 9422 return self.custom_retrieve(name, path) 9423 import http 9424 if self.authorize or isinstance(self_uploadfield, str): 9425 row = self.db(self == name).select().first() 9426 if not row: 9427 raise http.HTTP(404) 9428 if self.authorize and not self.authorize(row): 9429 raise http.HTTP(403) 9430 m = REGEX_UPLOAD_PATTERN.match(name) 9431 if not m or not self.isattachment: 9432 raise TypeError('Can\'t retrieve %s' % name) 9433 file_properties = self.retrieve_file_properties(name,path) 9434 filename = file_properties['filename'] 9435 if isinstance(self_uploadfield, str): # ## if file is in DB 9436 stream = StringIO.StringIO(row[self_uploadfield] or '') 9437 elif isinstance(self_uploadfield,Field): 9438 blob_uploadfield_name = self_uploadfield.uploadfield 9439 query = self_uploadfield == name 9440 data = self_uploadfield.table(query)[blob_uploadfield_name] 9441 stream = StringIO.StringIO(data) 9442 elif self.uploadfs: 9443 # ## if file is on pyfilesystem 9444 stream = self.uploadfs.open(name, 'rb') 9445 else: 9446 # ## if file is on regular filesystem 9447 # this is intentially a sting with filename and not a stream 9448 # this propagates and allows stream_file_or_304_or_206 to be called 9449 fullname = pjoin(file_properties['path'],name) 9450 if nameonly: 9451 return (filename, fullname) 9452 stream = open(fullname,'rb') 9453 return (filename, stream)
9454
9455 - def retrieve_file_properties(self, name, path=None):
9456 self_uploadfield = self.uploadfield 9457 if self.custom_retrieve_file_properties: 9458 return self.custom_retrieve_file_properties(name, path) 9459 try: 9460 m = REGEX_UPLOAD_PATTERN.match(name) 9461 if not m or not self.isattachment: 9462 raise TypeError('Can\'t retrieve %s file properties' % name) 9463 filename = base64.b16decode(m.group('name'), True) 9464 filename = REGEX_CLEANUP_FN.sub('_', filename) 9465 except (TypeError, AttributeError): 9466 filename = name 9467 if isinstance(self_uploadfield, str): # ## if file is in DB 9468 return dict(path=None,filename=filename) 9469 elif isinstance(self_uploadfield,Field): 9470 return dict(path=None,filename=filename) 9471 else: 9472 # ## if file is on filesystem 9473 if path: 9474 pass 9475 elif self.uploadfolder: 9476 path = self.uploadfolder 9477 else: 9478 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9479 if self.uploadseparate: 9480 t = m.group('table') 9481 f = m.group('field') 9482 u = m.group('uuidkey') 9483 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9484 return dict(path=path,filename=filename)
9485 9486
9487 - def formatter(self, value):
9488 requires = self.requires 9489 if value is None or not requires: 9490 return value or self.map_none 9491 if not isinstance(requires, (list, tuple)): 9492 requires = [requires] 9493 elif isinstance(requires, tuple): 9494 requires = list(requires) 9495 else: 9496 requires = copy.copy(requires) 9497 requires.reverse() 9498 for item in requires: 9499 if hasattr(item, 'formatter'): 9500 value = item.formatter(value) 9501 return value
9502
9503 - def validate(self, value):
9504 if not self.requires or self.requires == DEFAULT: 9505 return ((value if value!=self.map_none else None), None) 9506 requires = self.requires 9507 if not isinstance(requires, (list, tuple)): 9508 requires = [requires] 9509 for validator in requires: 9510 (value, error) = validator(value) 9511 if error: 9512 return (value, error) 9513 return ((value if value!=self.map_none else None), None)
9514
9515 - def count(self, distinct=None):
9516 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9517
9518 - def as_dict(self, flat=False, sanitize=True, options=True):
9519 9520 attrs = ('type', 'length', 'default', 'required', 9521 'ondelete', 'notnull', 'unique', 'uploadfield', 9522 'widget', 'label', 'comment', 'writable', 'readable', 9523 'update', 'authorize', 'autodelete', 'represent', 9524 'uploadfolder', 'uploadseparate', 'uploadfs', 9525 'compute', 'custom_store', 'custom_retrieve', 9526 'custom_retrieve_file_properties', 'custom_delete', 9527 'filter_in', 'filter_out', 'custom_qualifier', 9528 'map_none', 'name') 9529 9530 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9531 float, tuple, bool, type(None)) 9532 9533 def flatten(obj): 9534 if flat: 9535 if isinstance(obj, flatten.__class__): 9536 return str(type(obj)) 9537 elif isinstance(obj, type): 9538 try: 9539 return str(obj).split("'")[1] 9540 except IndexError: 9541 return str(obj) 9542 elif not isinstance(obj, SERIALIZABLE_TYPES): 9543 return str(obj) 9544 elif isinstance(obj, dict): 9545 newobj = dict() 9546 for k, v in obj.items(): 9547 newobj[k] = flatten(v) 9548 return newobj 9549 elif isinstance(obj, (list, tuple, set)): 9550 return [flatten(v) for v in obj] 9551 else: 9552 return obj 9553 elif isinstance(obj, (dict, set)): 9554 return obj.copy() 9555 else: return obj
9556 9557 def filter_requires(t, r, options=True): 9558 if sanitize and any([keyword in str(t).upper() for 9559 keyword in ("CRYPT", "IS_STRONG")]): 9560 return None 9561 9562 if not isinstance(r, dict): 9563 if options and hasattr(r, "options"): 9564 if callable(r.options): 9565 r.options() 9566 newr = r.__dict__.copy() 9567 else: 9568 newr = r.copy() 9569 9570 # remove options if not required 9571 if not options and newr.has_key("labels"): 9572 [newr.update({key:None}) for key in 9573 ("labels", "theset") if (key in newr)] 9574 9575 for k, v in newr.items(): 9576 if k == "other": 9577 if isinstance(v, dict): 9578 otype, other = v.popitem() 9579 else: 9580 otype = flatten(type(v)) 9581 other = v 9582 newr[k] = {otype: filter_requires(otype, other, 9583 options=options)} 9584 else: 9585 newr[k] = flatten(v) 9586 return newr
9587 9588 if isinstance(self.requires, (tuple, list, set)): 9589 requires = dict([(flatten(type(r)), 9590 filter_requires(type(r), r, 9591 options=options)) for 9592 r in self.requires]) 9593 else: 9594 requires = {flatten(type(self.requires)): 9595 filter_requires(type(self.requires), 9596 self.requires, options=options)} 9597 9598 d = dict(colname="%s.%s" % (self.tablename, self.name), 9599 requires=requires) 9600 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9601 return d 9602
9603 - def as_xml(self, sanitize=True, options=True):
9604 if have_serializers: 9605 xml = serializers.xml 9606 else: 9607 raise ImportError("No xml serializers available") 9608 d = self.as_dict(flat=True, sanitize=sanitize, 9609 options=options) 9610 return xml(d)
9611
9612 - def as_json(self, sanitize=True, options=True):
9613 if have_serializers: 9614 json = serializers.json 9615 else: 9616 raise ImportError("No json serializers available") 9617 d = self.as_dict(flat=True, sanitize=sanitize, 9618 options=options) 9619 return json(d)
9620
9621 - def as_yaml(self, sanitize=True, options=True):
9622 if have_serializers: 9623 d = self.as_dict(flat=True, sanitize=sanitize, 9624 options=options) 9625 return serializers.yaml(d) 9626 else: 9627 raise ImportError("No YAML serializers available")
9628
9629 - def __nonzero__(self):
9630 return True
9631
9632 - def __str__(self):
9633 try: 9634 return '%s.%s' % (self.tablename, self.name) 9635 except: 9636 return '<no table>.%s' % self.name
9637
9638 9639 -class Query(object):
9640 9641 """ 9642 a query object necessary to define a set. 9643 it can be stored or can be passed to DAL.__call__() to obtain a Set 9644 9645 Example:: 9646 9647 query = db.users.name=='Max' 9648 set = db(query) 9649 records = set.select() 9650 9651 """ 9652
9653 - def __init__( 9654 self, 9655 db, 9656 op, 9657 first=None, 9658 second=None, 9659 ignore_common_filters = False, 9660 **optional_args 9661 ):
9662 self.db = self._db = db 9663 self.op = op 9664 self.first = first 9665 self.second = second 9666 self.ignore_common_filters = ignore_common_filters 9667 self.optional_args = optional_args
9668
9669 - def __repr__(self):
9670 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9671
9672 - def __str__(self):
9673 return self.db._adapter.expand(self)
9674
9675 - def __and__(self, other):
9676 return Query(self.db,self.db._adapter.AND,self,other)
9677 9678 __rand__ = __and__ 9679
9680 - def __or__(self, other):
9681 return Query(self.db,self.db._adapter.OR,self,other)
9682 9683 __ror__ = __or__ 9684
9685 - def __invert__(self):
9686 if self.op==self.db._adapter.NOT: 9687 return self.first 9688 return Query(self.db,self.db._adapter.NOT,self)
9689
9690 - def __eq__(self, other):
9691 return repr(self) == repr(other)
9692
9693 - def __ne__(self, other):
9694 return not (self == other)
9695
9696 - def case(self,t=1,f=0):
9697 return self.db._adapter.CASE(self,t,f)
9698
9699 - def as_dict(self, flat=False, sanitize=True):
9700 """Experimental stuff 9701 9702 This allows to return a plain dictionary with the basic 9703 query representation. Can be used with json/xml services 9704 for client-side db I/O 9705 9706 Example: 9707 >>> q = db.auth_user.id != 0 9708 >>> q.as_dict(flat=True) 9709 {"op": "NE", "first":{"tablename": "auth_user", 9710 "fieldname": "id"}, 9711 "second":0} 9712 """ 9713 9714 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9715 basestring, type(None), bool) 9716 def loop(d): 9717 newd = dict() 9718 for k, v in d.items(): 9719 if k in ("first", "second"): 9720 if isinstance(v, self.__class__): 9721 newd[k] = loop(v.__dict__) 9722 elif isinstance(v, Field): 9723 newd[k] = {"tablename": v._tablename, 9724 "fieldname": v.name} 9725 elif isinstance(v, Expression): 9726 newd[k] = loop(v.__dict__) 9727 elif isinstance(v, SERIALIZABLE_TYPES): 9728 newd[k] = v 9729 elif isinstance(v, (datetime.date, 9730 datetime.time, 9731 datetime.datetime)): 9732 newd[k] = unicode(v) 9733 elif k == "op": 9734 if callable(v): 9735 newd[k] = v.__name__ 9736 elif isinstance(v, basestring): 9737 newd[k] = v 9738 else: pass # not callable or string 9739 elif isinstance(v, SERIALIZABLE_TYPES): 9740 if isinstance(v, dict): 9741 newd[k] = loop(v) 9742 else: newd[k] = v 9743 return newd
9744 9745 if flat: 9746 return loop(self.__dict__) 9747 else: return self.__dict__
9748 9749
9750 - def as_xml(self, sanitize=True):
9751 if have_serializers: 9752 xml = serializers.xml 9753 else: 9754 raise ImportError("No xml serializers available") 9755 d = self.as_dict(flat=True, sanitize=sanitize) 9756 return xml(d)
9757
9758 - def as_json(self, sanitize=True):
9759 if have_serializers: 9760 json = serializers.json 9761 else: 9762 raise ImportError("No json serializers available") 9763 d = self.as_dict(flat=True, sanitize=sanitize) 9764 return json(d)
9765
9766 -def xorify(orderby):
9767 if not orderby: 9768 return None 9769 orderby2 = orderby[0] 9770 for item in orderby[1:]: 9771 orderby2 = orderby2 | item 9772 return orderby2
9773
9774 -def use_common_filters(query):
9775 return (query and hasattr(query,'ignore_common_filters') and \ 9776 not query.ignore_common_filters)
9777
9778 -class Set(object):
9779 9780 """ 9781 a Set represents a set of records in the database, 9782 the records are identified by the query=Query(...) object. 9783 normally the Set is generated by DAL.__call__(Query(...)) 9784 9785 given a set, for example 9786 set = db(db.users.name=='Max') 9787 you can: 9788 set.update(db.users.name='Massimo') 9789 set.delete() # all elements in the set 9790 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9791 and take subsets: 9792 subset = set(db.users.id<5) 9793 """ 9794
9795 - def __init__(self, db, query, ignore_common_filters = None):
9796 self.db = db 9797 self._db = db # for backward compatibility 9798 self.dquery = None 9799 9800 # if query is a dict, parse it 9801 if isinstance(query, dict): 9802 query = self.parse(query) 9803 9804 if not ignore_common_filters is None and \ 9805 use_common_filters(query) == ignore_common_filters: 9806 query = copy.copy(query) 9807 query.ignore_common_filters = ignore_common_filters 9808 self.query = query
9809
9810 - def __repr__(self):
9811 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9812
9813 - def __call__(self, query, ignore_common_filters=False):
9814 if query is None: 9815 return self 9816 elif isinstance(query,Table): 9817 query = self.db._adapter.id_query(query) 9818 elif isinstance(query,str): 9819 query = Expression(self.db,query) 9820 elif isinstance(query,Field): 9821 query = query!=None 9822 if self.query: 9823 return Set(self.db, self.query & query, 9824 ignore_common_filters=ignore_common_filters) 9825 else: 9826 return Set(self.db, query, 9827 ignore_common_filters=ignore_common_filters)
9828
9829 - def _count(self,distinct=None):
9830 return self.db._adapter._count(self.query,distinct)
9831
9832 - def _select(self, *fields, **attributes):
9833 adapter = self.db._adapter 9834 tablenames = adapter.tables(self.query, 9835 attributes.get('join',None), 9836 attributes.get('left',None), 9837 attributes.get('orderby',None), 9838 attributes.get('groupby',None)) 9839 fields = adapter.expand_all(fields, tablenames) 9840 return adapter._select(self.query,fields,attributes)
9841
9842 - def _delete(self):
9843 db = self.db 9844 tablename = db._adapter.get_table(self.query) 9845 return db._adapter._delete(tablename,self.query)
9846
9847 - def _update(self, **update_fields):
9848 db = self.db 9849 tablename = db._adapter.get_table(self.query) 9850 fields = db[tablename]._listify(update_fields,update=True) 9851 return db._adapter._update(tablename,self.query,fields)
9852
9853 - def as_dict(self, flat=False, sanitize=True):
9854 if flat: 9855 uid = dbname = uri = None 9856 codec = self.db._db_codec 9857 if not sanitize: 9858 uri, dbname, uid = (self.db._dbname, str(self.db), 9859 self.db._db_uid) 9860 d = {"query": self.query.as_dict(flat=flat)} 9861 d["db"] = {"uid": uid, "codec": codec, 9862 "name": dbname, "uri": uri} 9863 return d 9864 else: return self.__dict__
9865
9866 - def as_xml(self, sanitize=True):
9867 if have_serializers: 9868 xml = serializers.xml 9869 else: 9870 raise ImportError("No xml serializers available") 9871 d = self.as_dict(flat=True, sanitize=sanitize) 9872 return xml(d)
9873
9874 - def as_json(self, sanitize=True):
9875 if have_serializers: 9876 json = serializers.json 9877 else: 9878 raise ImportError("No json serializers available") 9879 d = self.as_dict(flat=True, sanitize=sanitize) 9880 return json(d)
9881
9882 - def parse(self, dquery):
9883 "Experimental: Turn a dictionary into a Query object" 9884 self.dquery = dquery 9885 return self.build(self.dquery)
9886
9887 - def build(self, d):
9888 "Experimental: see .parse()" 9889 op, first, second = (d["op"], d["first"], 9890 d.get("second", None)) 9891 left = right = built = None 9892 9893 if op in ("AND", "OR"): 9894 if not (type(first), type(second)) == (dict, dict): 9895 raise SyntaxError("Invalid AND/OR query") 9896 if op == "AND": 9897 built = self.build(first) & self.build(second) 9898 else: built = self.build(first) | self.build(second) 9899 9900 elif op == "NOT": 9901 if first is None: 9902 raise SyntaxError("Invalid NOT query") 9903 built = ~self.build(first) 9904 else: 9905 # normal operation (GT, EQ, LT, ...) 9906 for k, v in {"left": first, "right": second}.items(): 9907 if isinstance(v, dict) and v.get("op"): 9908 v = self.build(v) 9909 if isinstance(v, dict) and ("tablename" in v): 9910 v = self.db[v["tablename"]][v["fieldname"]] 9911 if k == "left": left = v 9912 else: right = v 9913 9914 if hasattr(self.db._adapter, op): 9915 opm = getattr(self.db._adapter, op) 9916 9917 if op == "EQ": built = left == right 9918 elif op == "NE": built = left != right 9919 elif op == "GT": built = left > right 9920 elif op == "GE": built = left >= right 9921 elif op == "LT": built = left < right 9922 elif op == "LE": built = left <= right 9923 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9924 built = Expression(self.db, opm) 9925 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9926 "COALESCE_ZERO", "RAW", "INVERT"): 9927 built = Expression(self.db, opm, left) 9928 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9929 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9930 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9931 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9932 "COALESCE", "CONTAINS", "BELONGS"): 9933 built = Expression(self.db, opm, left, right) 9934 # expression as string 9935 elif not (left or right): built = Expression(self.db, op) 9936 else: 9937 raise SyntaxError("Operator not supported: %s" % op) 9938 9939 return built
9940
9941 - def isempty(self):
9942 return not self.select(limitby=(0,1), orderby_on_limitby=False)
9943
9944 - def count(self,distinct=None, cache=None):
9945 db = self.db 9946 if cache: 9947 cache_model, time_expire = cache 9948 sql = self._count(distinct=distinct) 9949 key = db._uri + '/' + sql 9950 if len(key)>200: key = hashlib_md5(key).hexdigest() 9951 return cache_model( 9952 key, 9953 (lambda self=self,distinct=distinct: \ 9954 db._adapter.count(self.query,distinct)), 9955 time_expire) 9956 return db._adapter.count(self.query,distinct)
9957
9958 - def select(self, *fields, **attributes):
9959 adapter = self.db._adapter 9960 tablenames = adapter.tables(self.query, 9961 attributes.get('join',None), 9962 attributes.get('left',None), 9963 attributes.get('orderby',None), 9964 attributes.get('groupby',None)) 9965 fields = adapter.expand_all(fields, tablenames) 9966 return adapter.select(self.query,fields,attributes)
9967
9968 - def nested_select(self,*fields,**attributes):
9969 return Expression(self.db,self._select(*fields,**attributes))
9970
9971 - def delete(self):
9972 db = self.db 9973 tablename = db._adapter.get_table(self.query) 9974 table = db[tablename] 9975 if any(f(self) for f in table._before_delete): return 0 9976 ret = db._adapter.delete(tablename,self.query) 9977 ret and [f(self) for f in table._after_delete] 9978 return ret
9979
9980 - def update(self, **update_fields):
9981 db = self.db 9982 tablename = db._adapter.get_table(self.query) 9983 table = db[tablename] 9984 table._attempt_upload(update_fields) 9985 if any(f(self,update_fields) for f in table._before_update): 9986 return 0 9987 fields = table._listify(update_fields,update=True) 9988 if not fields: 9989 raise SyntaxError("No fields to update") 9990 ret = db._adapter.update(tablename,self.query,fields) 9991 ret and [f(self,update_fields) for f in table._after_update] 9992 return ret
9993
9994 - def update_naive(self, **update_fields):
9995 """ 9996 same as update but does not call table._before_update and _after_update 9997 """ 9998 tablename = self.db._adapter.get_table(self.query) 9999 table = self.db[tablename] 10000 fields = table._listify(update_fields,update=True) 10001 if not fields: raise SyntaxError("No fields to update") 10002 ret = self.db._adapter.update(tablename,self.query,fields) 10003 return ret
10004
10005 - def validate_and_update(self, **update_fields):
10006 tablename = self.db._adapter.get_table(self.query) 10007 response = Row() 10008 response.errors = Row() 10009 new_fields = copy.copy(update_fields) 10010 for key,value in update_fields.iteritems(): 10011 value,error = self.db[tablename][key].validate(value) 10012 if error: 10013 response.errors[key] = error 10014 else: 10015 new_fields[key] = value 10016 table = self.db[tablename] 10017 if response.errors: 10018 response.updated = None 10019 else: 10020 if not any(f(self,new_fields) for f in table._before_update): 10021 fields = table._listify(new_fields,update=True) 10022 if not fields: raise SyntaxError("No fields to update") 10023 ret = self.db._adapter.update(tablename,self.query,fields) 10024 ret and [f(self,new_fields) for f in table._after_update] 10025 else: 10026 ret = 0 10027 response.updated = ret 10028 return response
10029
10030 - def delete_uploaded_files(self, upload_fields=None):
10031 table = self.db[self.db._adapter.tables(self.query)[0]] 10032 # ## mind uploadfield==True means file is not in DB 10033 if upload_fields: 10034 fields = upload_fields.keys() 10035 else: 10036 fields = table.fields 10037 fields = [f for f in fields if table[f].type == 'upload' 10038 and table[f].uploadfield == True 10039 and table[f].autodelete] 10040 if not fields: 10041 return False 10042 for record in self.select(*[table[f] for f in fields]): 10043 for fieldname in fields: 10044 field = table[fieldname] 10045 oldname = record.get(fieldname, None) 10046 if not oldname: 10047 continue 10048 if upload_fields and oldname == upload_fields[fieldname]: 10049 continue 10050 if field.custom_delete: 10051 field.custom_delete(oldname) 10052 else: 10053 uploadfolder = field.uploadfolder 10054 if not uploadfolder: 10055 uploadfolder = pjoin( 10056 self.db._adapter.folder, '..', 'uploads') 10057 if field.uploadseparate: 10058 items = oldname.split('.') 10059 uploadfolder = pjoin( 10060 uploadfolder, 10061 "%s.%s" % (items[0], items[1]), 10062 items[2][:2]) 10063 oldpath = pjoin(uploadfolder, oldname) 10064 if exists(oldpath): 10065 os.unlink(oldpath) 10066 return False
10067
10068 -class RecordUpdater(object):
10069 - def __init__(self, colset, table, id):
10070 self.colset, self.db, self.tablename, self.id = \ 10071 colset, table._db, table._tablename, id
10072
10073 - def __call__(self, **fields):
10074 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10075 table = db[tablename] 10076 newfields = fields or dict(colset) 10077 for fieldname in newfields.keys(): 10078 if not fieldname in table.fields or table[fieldname].type=='id': 10079 del newfields[fieldname] 10080 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10081 colset.update(newfields) 10082 return colset
10083
10084 -class RecordDeleter(object):
10085 - def __init__(self, table, id):
10086 self.db, self.tablename, self.id = table._db, table._tablename, id
10087 - def __call__(self):
10088 return self.db(self.db[self.tablename]._id==self.id).delete()
10089
10090 -class LazySet(object):
10091 - def __init__(self, field, id):
10092 self.db, self.tablename, self.fieldname, self.id = \ 10093 field.db, field._tablename, field.name, id
10094 - def _getset(self):
10095 query = self.db[self.tablename][self.fieldname]==self.id 10096 return Set(self.db,query)
10097 - def __repr__(self):
10098 return repr(self._getset())
10099 - def __call__(self, query, ignore_common_filters=False):
10100 return self._getset()(query, ignore_common_filters)
10101 - def _count(self,distinct=None):
10102 return self._getset()._count(distinct)
10103 - def _select(self, *fields, **attributes):
10104 return self._getset()._select(*fields,**attributes)
10105 - def _delete(self):
10106 return self._getset()._delete()
10107 - def _update(self, **update_fields):
10108 return self._getset()._update(**update_fields)
10109 - def isempty(self):
10110 return self._getset().isempty()
10111 - def count(self,distinct=None, cache=None):
10112 return self._getset().count(distinct,cache)
10113 - def select(self, *fields, **attributes):
10114 return self._getset().select(*fields,**attributes)
10115 - def nested_select(self,*fields,**attributes):
10116 return self._getset().nested_select(*fields,**attributes)
10117 - def delete(self):
10118 return self._getset().delete()
10119 - def update(self, **update_fields):
10120 return self._getset().update(**update_fields)
10121 - def update_naive(self, **update_fields):
10122 return self._getset().update_naive(**update_fields)
10123 - def validate_and_update(self, **update_fields):
10124 return self._getset().validate_and_update(**update_fields)
10125 - def delete_uploaded_files(self, upload_fields=None):
10126 return self._getset().delete_uploaded_files(upload_fields)
10127
10128 -class VirtualCommand(object):
10129 - def __init__(self,method,row):
10130 self.method=method 10131 self.row=row
10132 - def __call__(self,*args,**kwargs):
10133 return self.method(self.row,*args,**kwargs)
10134
10135 -def lazy_virtualfield(f):
10136 f.__lazy__ = True 10137 return f
10138
10139 -class Rows(object):
10140 10141 """ 10142 A wrapper for the return value of a select. It basically represents a table. 10143 It has an iterator and each row is represented as a dictionary. 10144 """ 10145 10146 # ## TODO: this class still needs some work to care for ID/OID 10147
10148 - def __init__( 10149 self, 10150 db=None, 10151 records=[], 10152 colnames=[], 10153 compact=True, 10154 rawrows=None 10155 ):
10156 self.db = db 10157 self.records = records 10158 self.colnames = colnames 10159 self.compact = compact 10160 self.response = rawrows
10161
10162 - def __repr__(self):
10163 return '<Rows (%s)>' % len(self.records)
10164
10165 - def setvirtualfields(self,**keyed_virtualfields):
10166 """ 10167 db.define_table('x',Field('number','integer')) 10168 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10169 10170 from gluon.dal import lazy_virtualfield 10171 10172 class MyVirtualFields(object): 10173 # normal virtual field (backward compatible, discouraged) 10174 def normal_shift(self): return self.x.number+1 10175 # lazy virtual field (because of @staticmethod) 10176 @lazy_virtualfield 10177 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10178 db.x.virtualfields.append(MyVirtualFields()) 10179 10180 for row in db(db.x).select(): 10181 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10182 """ 10183 if not keyed_virtualfields: 10184 return self 10185 for row in self.records: 10186 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10187 attributes = dir(virtualfields) 10188 if not tablename in row: 10189 box = row[tablename] = Row() 10190 else: 10191 box = row[tablename] 10192 updated = False 10193 for attribute in attributes: 10194 if attribute[0] != '_': 10195 method = getattr(virtualfields,attribute) 10196 if hasattr(method,'__lazy__'): 10197 box[attribute]=VirtualCommand(method,row) 10198 elif type(method)==types.MethodType: 10199 if not updated: 10200 virtualfields.__dict__.update(row) 10201 updated = True 10202 box[attribute]=method() 10203 return self
10204
10205 - def __and__(self,other):
10206 if self.colnames!=other.colnames: 10207 raise Exception('Cannot & incompatible Rows objects') 10208 records = self.records+other.records 10209 return Rows(self.db,records,self.colnames)
10210
10211 - def __or__(self,other):
10212 if self.colnames!=other.colnames: 10213 raise Exception('Cannot | incompatible Rows objects') 10214 records = self.records 10215 records += [record for record in other.records \ 10216 if not record in records] 10217 return Rows(self.db,records,self.colnames)
10218
10219 - def __nonzero__(self):
10220 if len(self.records): 10221 return 1 10222 return 0
10223
10224 - def __len__(self):
10225 return len(self.records)
10226
10227 - def __getslice__(self, a, b):
10228 return Rows(self.db,self.records[a:b],self.colnames)
10229
10230 - def __getitem__(self, i):
10231 row = self.records[i] 10232 keys = row.keys() 10233 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10234 return row[row.keys()[0]] 10235 return row
10236
10237 - def __iter__(self):
10238 """ 10239 iterator over records 10240 """ 10241 10242 for i in xrange(len(self)): 10243 yield self[i]
10244
10245 - def __str__(self):
10246 """ 10247 serializes the table into a csv file 10248 """ 10249 10250 s = StringIO.StringIO() 10251 self.export_to_csv_file(s) 10252 return s.getvalue()
10253
10254 - def first(self):
10255 if not self.records: 10256 return None 10257 return self[0]
10258
10259 - def last(self):
10260 if not self.records: 10261 return None 10262 return self[-1]
10263
10264 - def find(self,f,limitby=None):
10265 """ 10266 returns a new Rows object, a subset of the original object, 10267 filtered by the function f 10268 """ 10269 if not self: 10270 return Rows(self.db, [], self.colnames) 10271 records = [] 10272 if limitby: 10273 a,b = limitby 10274 else: 10275 a,b = 0,len(self) 10276 k = 0 10277 for row in self: 10278 if f(row): 10279 if a<=k: records.append(row) 10280 k += 1 10281 if k==b: break 10282 return Rows(self.db, records, self.colnames)
10283
10284 - def exclude(self, f):
10285 """ 10286 removes elements from the calling Rows object, filtered by the function f, 10287 and returns a new Rows object containing the removed elements 10288 """ 10289 if not self.records: 10290 return Rows(self.db, [], self.colnames) 10291 removed = [] 10292 i=0 10293 while i<len(self): 10294 row = self[i] 10295 if f(row): 10296 removed.append(self.records[i]) 10297 del self.records[i] 10298 else: 10299 i += 1 10300 return Rows(self.db, removed, self.colnames)
10301
10302 - def sort(self, f, reverse=False):
10303 """ 10304 returns a list of sorted elements (not sorted in place) 10305 """ 10306 rows = Rows(self.db,[],self.colnames,compact=False) 10307 rows.records = sorted(self,key=f,reverse=reverse) 10308 return rows
10309 10310
10311 - def group_by_value(self, field):
10312 """ 10313 regroups the rows, by one of the fields 10314 """ 10315 if not self.records: 10316 return {} 10317 key = str(field) 10318 grouped_row_group = dict() 10319 10320 for row in self: 10321 value = row[key] 10322 if not value in grouped_row_group: 10323 grouped_row_group[value] = [row] 10324 else: 10325 grouped_row_group[value].append(row) 10326 return grouped_row_group
10327
10328 - def as_list(self, 10329 compact=True, 10330 storage_to_dict=True, 10331 datetime_to_str=True, 10332 custom_types=None):
10333 """ 10334 returns the data as a list or dictionary. 10335 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10336 :param datetime_to_str: convert datetime fields as strings (default True) 10337 """ 10338 (oc, self.compact) = (self.compact, compact) 10339 if storage_to_dict: 10340 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10341 else: 10342 items = [item for item in self] 10343 self.compact = compact 10344 return items
10345 10346
10347 - def as_dict(self, 10348 key='id', 10349 compact=True, 10350 storage_to_dict=True, 10351 datetime_to_str=True, 10352 custom_types=None):
10353 """ 10354 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10355 10356 :param key: the name of the field to be used as dict key, normally the id 10357 :param compact: ? (default True) 10358 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10359 :param datetime_to_str: convert datetime fields as strings (default True) 10360 """ 10361 10362 # test for multiple rows 10363 multi = False 10364 f = self.first() 10365 if f: 10366 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10367 if (not "." in key) and multi: 10368 # No key provided, default to int indices 10369 def new_key(): 10370 i = 0 10371 while True: 10372 yield i 10373 i += 1
10374 key_generator = new_key() 10375 key = lambda r: key_generator.next() 10376 10377 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10378 if isinstance(key,str) and key.count('.')==1: 10379 (table, field) = key.split('.') 10380 return dict([(r[table][field],r) for r in rows]) 10381 elif isinstance(key,str): 10382 return dict([(r[key],r) for r in rows]) 10383 else: 10384 return dict([(key(r),r) for r in rows])
10385
10386 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10387 """ 10388 export data to csv, the first line contains the column names 10389 10390 :param ofile: where the csv must be exported to 10391 :param null: how null values must be represented (default '<NULL>') 10392 :param delimiter: delimiter to separate values (default ',') 10393 :param quotechar: character to use to quote string values (default '"') 10394 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10395 :param represent: use the fields .represent value (default False) 10396 :param colnames: list of column names to use (default self.colnames) 10397 This will only work when exporting rows objects!!!! 10398 DO NOT use this with db.export_to_csv() 10399 """ 10400 delimiter = kwargs.get('delimiter', ',') 10401 quotechar = kwargs.get('quotechar', '"') 10402 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10403 represent = kwargs.get('represent', False) 10404 writer = csv.writer(ofile, delimiter=delimiter, 10405 quotechar=quotechar, quoting=quoting) 10406 colnames = kwargs.get('colnames', self.colnames) 10407 write_colnames = kwargs.get('write_colnames',True) 10408 # a proper csv starting with the column names 10409 if write_colnames: 10410 writer.writerow(colnames) 10411 10412 def none_exception(value): 10413 """ 10414 returns a cleaned up value that can be used for csv export: 10415 - unicode text is encoded as such 10416 - None values are replaced with the given representation (default <NULL>) 10417 """ 10418 if value is None: 10419 return null 10420 elif isinstance(value, unicode): 10421 return value.encode('utf8') 10422 elif isinstance(value,Reference): 10423 return long(value) 10424 elif hasattr(value, 'isoformat'): 10425 return value.isoformat()[:19].replace('T', ' ') 10426 elif isinstance(value, (list,tuple)): # for type='list:..' 10427 return bar_encode(value) 10428 return value
10429 10430 for record in self: 10431 row = [] 10432 for col in colnames: 10433 if not REGEX_TABLE_DOT_FIELD.match(col): 10434 row.append(record._extra[col]) 10435 else: 10436 (t, f) = col.split('.') 10437 field = self.db[t][f] 10438 if isinstance(record.get(t, None), (Row,dict)): 10439 value = record[t][f] 10440 else: 10441 value = record[f] 10442 if field.type=='blob' and not value is None: 10443 value = base64.b64encode(value) 10444 elif represent and field.represent: 10445 value = field.represent(value) 10446 row.append(none_exception(value)) 10447 writer.writerow(row) 10448
10449 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10450 """ 10451 serializes the table using sqlhtml.SQLTABLE (if present) 10452 """ 10453 10454 if strict: 10455 ncols = len(self.colnames) 10456 return '<%s>\n%s\n</%s>' % (rows_name, 10457 '\n'.join(row.as_xml(row_name=row_name, 10458 colnames=self.colnames) for 10459 row in self), rows_name) 10460 10461 import sqlhtml 10462 return sqlhtml.SQLTABLE(self).xml()
10463
10464 - def as_xml(self,row_name='row',rows_name='rows'):
10465 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10466
10467 - def as_json(self, mode='object', default=None):
10468 """ 10469 serializes the table to a JSON list of objects 10470 """ 10471 10472 items = [record.as_json(mode=mode, default=default, 10473 serialize=False, 10474 colnames=self.colnames) for 10475 record in self] 10476 10477 if have_serializers: 10478 return serializers.json(items, 10479 default=default or 10480 serializers.custom_json) 10481 elif simplejson: 10482 return simplejson.dumps(items) 10483 else: 10484 raise RuntimeError("missing simplejson")
10485 10486 # for consistent naming yet backwards compatible 10487 as_csv = __str__ 10488 json = as_json 10489
10490 ################################################################################ 10491 # dummy function used to define some doctests 10492 ################################################################################ 10493 10494 -def test_all():
10495 """ 10496 10497 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10498 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10499 >>> tmp = db.define_table('users',\ 10500 Field('stringf', 'string', length=32, required=True),\ 10501 Field('booleanf', 'boolean', default=False),\ 10502 Field('passwordf', 'password', notnull=True),\ 10503 Field('uploadf', 'upload'),\ 10504 Field('blobf', 'blob'),\ 10505 Field('integerf', 'integer', unique=True),\ 10506 Field('doublef', 'double', unique=True,notnull=True),\ 10507 Field('jsonf', 'json'),\ 10508 Field('datef', 'date', default=datetime.date.today()),\ 10509 Field('timef', 'time'),\ 10510 Field('datetimef', 'datetime'),\ 10511 migrate='test_user.table') 10512 10513 Insert a field 10514 10515 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10516 uploadf=None, integerf=5, doublef=3.14,\ 10517 jsonf={"j": True},\ 10518 datef=datetime.date(2001, 1, 1),\ 10519 timef=datetime.time(12, 30, 15),\ 10520 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10521 1 10522 10523 Drop the table 10524 10525 >>> db.users.drop() 10526 10527 Examples of insert, select, update, delete 10528 10529 >>> tmp = db.define_table('person',\ 10530 Field('name'),\ 10531 Field('birth','date'),\ 10532 migrate='test_person.table') 10533 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10534 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10535 10536 commented len(db().select(db.person.ALL)) 10537 commented 2 10538 10539 >>> me = db(db.person.id==person_id).select()[0] # test select 10540 >>> me.name 10541 'Massimo' 10542 >>> db.person[2].name 10543 'Massimo' 10544 >>> db.person(2).name 10545 'Massimo' 10546 >>> db.person(name='Massimo').name 10547 'Massimo' 10548 >>> db.person(db.person.name=='Massimo').name 10549 'Massimo' 10550 >>> row = db.person[2] 10551 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10552 True 10553 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10554 1 10555 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10556 1 10557 10558 Update a single record 10559 10560 >>> me.update_record(name=\"Max\") 10561 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10562 >>> me.name 10563 'Max' 10564 10565 Examples of complex search conditions 10566 10567 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10568 1 10569 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10570 1 10571 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10572 1 10573 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10574 >>> me.name 10575 'Max' 10576 10577 Examples of search conditions using extract from date/datetime/time 10578 10579 >>> len(db(db.person.birth.month()==12).select()) 10580 1 10581 >>> len(db(db.person.birth.year()>1900).select()) 10582 1 10583 10584 Example of usage of NULL 10585 10586 >>> len(db(db.person.birth==None).select()) ### test NULL 10587 0 10588 >>> len(db(db.person.birth!=None).select()) ### test NULL 10589 1 10590 10591 Examples of search conditions using lower, upper, and like 10592 10593 >>> len(db(db.person.name.upper()=='MAX').select()) 10594 1 10595 >>> len(db(db.person.name.like('%ax')).select()) 10596 1 10597 >>> len(db(db.person.name.upper().like('%AX')).select()) 10598 1 10599 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10600 0 10601 10602 orderby, groupby and limitby 10603 10604 >>> people = db().select(db.person.name, orderby=db.person.name) 10605 >>> order = db.person.name|~db.person.birth 10606 >>> people = db().select(db.person.name, orderby=order) 10607 10608 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10609 10610 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10611 10612 Example of one 2 many relation 10613 10614 >>> tmp = db.define_table('dog',\ 10615 Field('name'),\ 10616 Field('birth','date'),\ 10617 Field('owner',db.person),\ 10618 migrate='test_dog.table') 10619 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10620 1 10621 10622 A simple JOIN 10623 10624 >>> len(db(db.dog.owner==db.person.id).select()) 10625 1 10626 10627 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10628 1 10629 10630 Drop tables 10631 10632 >>> db.dog.drop() 10633 >>> db.person.drop() 10634 10635 Example of many 2 many relation and Set 10636 10637 >>> tmp = db.define_table('author', Field('name'),\ 10638 migrate='test_author.table') 10639 >>> tmp = db.define_table('paper', Field('title'),\ 10640 migrate='test_paper.table') 10641 >>> tmp = db.define_table('authorship',\ 10642 Field('author_id', db.author),\ 10643 Field('paper_id', db.paper),\ 10644 migrate='test_authorship.table') 10645 >>> aid = db.author.insert(name='Massimo') 10646 >>> pid = db.paper.insert(title='QCD') 10647 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10648 10649 Define a Set 10650 10651 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10652 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10653 >>> for row in rows: print row.author.name, row.paper.title 10654 Massimo QCD 10655 10656 Example of search condition using belongs 10657 10658 >>> set = (1, 2, 3) 10659 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10660 >>> print rows[0].title 10661 QCD 10662 10663 Example of search condition using nested select 10664 10665 >>> nested_select = db()._select(db.authorship.paper_id) 10666 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10667 >>> print rows[0].title 10668 QCD 10669 10670 Example of expressions 10671 10672 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10673 >>> db(mynumber).delete() 10674 0 10675 >>> for i in range(10): tmp = mynumber.insert(x=i) 10676 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10677 45 10678 10679 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10680 5 10681 10682 Output in csv 10683 10684 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10685 author.name,paper.title\r 10686 Massimo,QCD 10687 10688 Delete all leftover tables 10689 10690 >>> DAL.distributed_transaction_commit(db) 10691 10692 >>> db.mynumber.drop() 10693 >>> db.authorship.drop() 10694 >>> db.author.drop() 10695 >>> db.paper.drop() 10696 """
10697 ################################################################################ 10698 # deprecated since the new DAL; here only for backward compatibility 10699 ################################################################################ 10700 10701 SQLField = Field 10702 SQLTable = Table 10703 SQLXorable = Expression 10704 SQLQuery = Query 10705 SQLSet = Set 10706 SQLRows = Rows 10707 SQLStorage = Row 10708 SQLDB = DAL 10709 GQLDB = DAL 10710 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10711 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10712 10713 ################################################################################ 10714 # Geodal utils 10715 ################################################################################ 10716 10717 -def geoPoint(x,y):
10718 return "POINT (%f %f)" % (x,y)
10719
10720 -def geoLine(*line):
10721 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10722
10723 -def geoPolygon(*line):
10724 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10725 10726 ################################################################################ 10727 # run tests 10728 ################################################################################ 10729 10730 if __name__ == '__main__': 10731 import doctest 10732 doctest.testmod() 10733